|
import os |
|
import gradio as gr |
|
from langchain.agents import Agent |
|
from langchain_community.llms import HuggingFaceEndpoint |
|
from langchain_community.vectorstores import FAISS |
|
from langchain_community.embeddings import HuggingFaceEmbeddings |
|
|
|
|
|
HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN', 'your_huggingface_api_token_here') |
|
|
|
|
|
llm = HuggingFaceEndpoint(endpoint_url="https://api-inference.huggingface.co/models/gpt2", |
|
headers={"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"}) |
|
|
|
|
|
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
|
|
|
|
|
vectorstore = FAISS(embeddings.embed_query, embeddings.embed_documents) |
|
|
|
|
|
class ResearchAgent(Agent): |
|
def run(self, query): |
|
return llm(query + " Please provide a detailed explanation.") |
|
|
|
class SummaryAgent(Agent): |
|
def run(self, query): |
|
return llm(query + " Summarize the information briefly.") |
|
|
|
class QAAgent(Agent): |
|
def run(self, query): |
|
return llm(query + " Answer the following question: " + query) |
|
|
|
|
|
research_agent = ResearchAgent() |
|
summary_agent = SummaryAgent() |
|
qa_agent = QAAgent() |
|
|
|
|
|
def agent_interaction(query, agent_type): |
|
if agent_type == "Research": |
|
return research_agent.run(query) |
|
elif agent_type == "Summary": |
|
return summary_agent.run(query) |
|
elif agent_type == "Q&A": |
|
return qa_agent.run(query) |
|
|
|
|
|
interface = gr.Interface( |
|
fn=agent_interaction, |
|
inputs=[ |
|
gr.inputs.Textbox(lines=2, placeholder="Enter your query here..."), |
|
gr.inputs.Radio(["Research", "Summary", "Q&A"], label="Agent Type") |
|
], |
|
outputs="text" |
|
) |
|
|
|
if __name__ == "__main__": |
|
interface.launch() |
|
|