Pijush2023's picture
Update app.py
eac78c4 verified
import os
import gradio as gr
from langchain_redis import RedisConfig, RedisVectorStore
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_groq import ChatGroq
from langchain_community.embeddings import OpenAIEmbeddings
import logging
from huggingface_hub import login
hf_token = os.getenv("HF_TOKEN")
if hf_token is None:
print("Please set your Hugging Face token in the environment variables.")
else:
login(token=hf_token)
logging.basicConfig(level=logging.DEBUG)
# Set API keys
openai_api_key=os.environ["OPENAI_API_KEY"]
groq_api_key=os.environ["GROQ_API_KEY"]
# Define Redis configuration
REDIS_URL = "redis://:KWq0uAoBYjBGErKvyMvexMqB9ep7v2Ct@redis-11044.c266.us-east-1-3.ec2.redns.redis-cloud.com:11044"
config = RedisConfig(
index_name="radar_data_index",
redis_url=REDIS_URL,
metadata_schema=[
{"name": "category", "type": "tag"},
{"name": "name", "type": "text"},
{"name": "address", "type": "text"},
{"name": "phone", "type": "text"},
],
)
# Initialize OpenAI Embeddings
embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
# Initialize Redis Vector Store with Hugging Face embeddings
vector_store = RedisVectorStore(embeddings, config=config)
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
# Define the language model
llm = ChatGroq(model="llama-3.2-1b-preview")
# Define prompt
prompt = ChatPromptTemplate.from_messages(
[
(
"human",
""""You’re Annie, a charming country music voicebot and media personality created by Amit Lamba, dedicated to guiding folks around Birmingham, Alabama.
Your style is warm, witty, and conversational—like chatting with a clever, friendly neighbor. Deliver advice that’s concise, accurate, and sprinkled with humor, usually in one or two sentences.
Focus on creating a delightful experience that feels like enjoying a slice of Southern hospitality, leaving users with a smile and encouraging follow-up questions to keep the conversation flowing smoothly—just like a perfect encore at a country music show.
Question: {question}
Context: {context}
Answer:""",
),
]
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
#
# Function to handle chatbot interaction
def rag_chain_response(messages, user_message):
# Generate a response using the RAG chain
response = rag_chain.invoke(user_message)
# Append the user's message and the response to the chat
messages.append((user_message, response))
# Return the updated chat and clear the input box
return messages, ""
# Define the Gradio app
with gr.Blocks(theme="rawrsor1/Everforest") as app:
chatbot = gr.Chatbot([], elem_id="RADAR", bubble_full_width=False)
question_input = gr.Textbox(label="Ask a Question", placeholder="Type your question here...")
submit_btn = gr.Button("Submit")
# Set up interaction for both Enter key and Submit button
question_input.submit(
rag_chain_response, # Function to handle input and generate response
inputs=[chatbot, question_input], # Pass current conversation state and user input
outputs=[chatbot, question_input], # Update conversation state and clear the input
api_name="api_get_response_on_enter"
)
submit_btn.click(
rag_chain_response, # Function to handle input and generate response
inputs=[chatbot, question_input], # Pass current conversation state and user input
outputs=[chatbot, question_input], # Update conversation state and clear the input
api_name="api_get_response_on_submit_button"
)
# Launch the Gradio app
app.launch(show_error=True)