jason1i's picture
Duplicate from ml-maker-space/AliceInWonderLandChainlit
5eeff8b
from langchain.agents import initialize_agent, Tool
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.agents import AgentType
from langchain.tools import BaseTool
from langchain.llms import OpenAI
from langchain import SerpAPIWrapper, LLMChain
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.vectorstores import Chroma
import os
import arxiv
import chainlit as cl
from chainlit import user_session
@cl.langchain_factory(use_async=True)
async def init():
# Set the OpenAI Embeddings model
embeddings = embeddings = OpenAIEmbeddings()
# Set the persist directory
persist_directory = "vector_db"
# Load the persisted Chroma vector store
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
# Create a chain that uses the Chroma vector store
alice_qa = RetrievalQA.from_chain_type(
ChatOpenAI(
model_name="gpt-3.5-turbo-16k",
temperature=0,
),
chain_type="stuff",
retriever=vectordb.as_retriever(),
)
search = SerpAPIWrapper()
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
tools = [
Tool(
name = "Alice in Wonderland QA System",
func=alice_qa.run,
description="useful for when you need to answer questions about Alice in Wonderland. Input should be a fully formed question."
),
Tool(
name = "Backup Alice Google Search",
func=search.run,
description="useful for when you need to answer questions about Alice in Wonderland but only when the Alice in Wonderland QA System couldn't answer the query. Input should be a fully formed question."
),
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"]
)
llm_chain = LLMChain(
llm=ChatOpenAI(
model_name="gpt-3.5-turbo-16k",
temperature=0,
),
prompt=prompt
)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)
# Let the user know that the system is ready
await cl.Message(
content=f"You can begin by asking any questions about Alice in Wonderland!"
).send()
return agent_chain
@cl.langchain_run
async def run(agent, input_str):
res = await cl.make_async(agent)(input_str, callbacks=[cl.LangchainCallbackHandler()])
print(res)
await cl.Message(content=res["output"]).send()
@cl.langchain_rename
def rename(original_llm_chain: str):
rename_dict = {
"LLMChain" : "The Mad Hatter 🤪🎩"
}
return rename_dict.get(original_llm_chain, original_llm_chain)