Spaces:
Running
Running
File size: 2,161 Bytes
d165b85 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import os
from tools import FindResearchDirectionsTool, JudgeNoveltyTool, FindReferencesTool
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain.agents import AgentType
import openai
from langchain.schema import SystemMessage
from langchain.memory import ConversationBufferMemory
openai.api_key = os.getenv("OPENAI_API_KEY")
print(os.getenv("OPENAI_API_KEY"))
default_model = os.getenv("DEFAULT_MODEL")
if default_model is None:
default_model = "gpt-3.5-turbo-16k"
import chainlit as cl
agent_kwargs = {
"system_message": SystemMessage(content="You are a mighty cyber professor. "
"Your task is to assist your student to find an idea of research including:"
"1. Search related references."
"2. Propose potential research directions."
"3. Evaluate the novelty of any research direction."
"Follow the following instructions: "
"1. You always response in the same language as your student."
"2. Ask your student for further information if necessary to provide more assistance. ")
}
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
@cl.langchain_factory(use_async=False)
def main():
tools = [FindResearchDirectionsTool(), JudgeNoveltyTool(), FindReferencesTool()]
llm = ChatOpenAI(temperature=0.9, model=default_model, streaming=True)
open_ai_agent = initialize_agent(tools,
llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=agent_kwargs,
memory=memory)
return open_ai_agent
@cl.langchain_run
async def run(agent, input_str):
res = await cl.make_async(agent)(input_str, callbacks=[cl.LangchainCallbackHandler()])
print(res)
await cl.Message(content=res["output"]).send() |