agents / app.py
Merlintxu's picture
Update app.py
b10b127 verified
raw
history blame
1.98 kB
import os
import gradio as gr
from langchain.agents import Agent
from langchain_community.llms import HuggingFaceEndpoint
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
# Set Hugging Face API token
HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN', 'your_huggingface_api_token_here')
# Initialize the LLM from Hugging Face Hub
llm = HuggingFaceEndpoint(endpoint_url="https://api-inference.huggingface.co/models/gpt2",
headers={"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"})
# Initialize embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Initialize the vector database (FAISS)
vectorstore = FAISS(embeddings.embed_query, embeddings.embed_documents)
# Define the agents with distinct roles
class ResearchAgent(Agent):
def run(self, query):
return llm(query + " Please provide a detailed explanation.")
class SummaryAgent(Agent):
def run(self, query):
return llm(query + " Summarize the information briefly.")
class QAAgent(Agent):
def run(self, query):
return llm(query + " Answer the following question: " + query)
# Create instances of the agents
research_agent = ResearchAgent()
summary_agent = SummaryAgent()
qa_agent = QAAgent()
# Function to handle the interaction with the agents
def agent_interaction(query, agent_type):
if agent_type == "Research":
return research_agent.run(query)
elif agent_type == "Summary":
return summary_agent.run(query)
elif agent_type == "Q&A":
return qa_agent.run(query)
# Create a Gradio interface
interface = gr.Interface(
fn=agent_interaction,
inputs=[
gr.inputs.Textbox(lines=2, placeholder="Enter your query here..."),
gr.inputs.Radio(["Research", "Summary", "Q&A"], label="Agent Type")
],
outputs="text"
)
if __name__ == "__main__":
interface.launch()