OuroborosM commited on
Commit
32f9e57
·
1 Parent(s): 99ba5fb

add openai agent

Browse files
Files changed (1) hide show
  1. app.py +55 -16
app.py CHANGED
@@ -1,7 +1,7 @@
1
  # from typing import Any, Coroutine
2
  import openai
3
  import os
4
- from langchain.vectorstores import Chroma
5
  from langchain.embeddings.openai import OpenAIEmbeddings
6
  from langchain.text_splitter import CharacterTextSplitter
7
  from langchain.chat_models import AzureChatOpenAI
@@ -19,6 +19,14 @@ from langchain.python import PythonREPL
19
  from langchain.chains import LLMMathChain
20
  from langchain.memory import ConversationBufferMemory
21
  from langchain.agents import ZeroShotAgent, AgentExecutor
 
 
 
 
 
 
 
 
22
  from langchain import LLMChain
23
  import azure.cognitiveservices.speech as speechsdk
24
  import requests
@@ -51,6 +59,9 @@ from langchain.docstore.document import Document
51
 
52
 
53
  memory = ConversationBufferMemory(memory_key="chat_history")
 
 
 
54
  # Custom document loaders
55
  class MyElmLoader(UnstructuredEmailLoader):
56
  """Wrapper to fallback to text/plain when default does not work"""
@@ -413,9 +424,15 @@ prompt = ZeroShotAgent.create_prompt(
413
  input_variables=["input", "chat_history", "agent_scratchpad"]
414
  )
415
 
 
 
 
 
 
 
416
  input_variables=["input", "chat_history", "agent_scratchpad"]
417
 
418
- agent2 = initialize_agent(tools, llm,
419
  # agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
420
  agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
421
  verbose = True,
@@ -436,8 +453,14 @@ agent2 = initialize_agent(tools, llm,
436
  )
437
 
438
  llm_chain = LLMChain(llm=llm, prompt=prompt)
 
 
 
439
  agent_core = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
440
- agent = AgentExecutor.from_agent_and_tools(
 
 
 
441
  agent=agent_core,
442
  tools=tools,
443
  verbose=True,
@@ -446,15 +469,28 @@ agent = AgentExecutor.from_agent_and_tools(
446
  max_iterations = int(os.getenv("max_iterations")),
447
  early_stopping_method="generate",
448
  )
449
- agent.max_execution_time = int(os.getenv("max_iterations"))
450
- agent.handle_parsing_errors = True
451
- agent.early_stopping_method = "generate"
 
 
 
 
 
 
 
 
 
 
 
 
 
452
 
453
  print(agent.agent.llm_chain.prompt.template)
454
  # print(agent.agent.llm_chain.prompt)
455
 
456
  global vectordb
457
- vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
458
  global vectordb_p
459
  vectordb_p = Pinecone.from_existing_index(index_name, embeddings)
460
 
@@ -610,15 +646,18 @@ def QAQuery_p(question: str):
610
  source = res['source_documents']
611
  return response, source
612
 
613
- def CreatDb():
614
- global vectordb
615
- loader = DirectoryLoader('./documents', glob='**/*.txt')
616
- documents = loader.load()
617
- text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=200)
618
- split_docs = text_splitter.split_documents(documents)
619
- print(split_docs)
620
- vectordb = Chroma.from_documents(split_docs, embeddings, persist_directory='db')
621
- vectordb.persist()
 
 
 
622
 
623
  def QAQuery(question: str):
624
  global vectordb
 
1
  # from typing import Any, Coroutine
2
  import openai
3
  import os
4
+ # from langchain.vectorstores import Chroma
5
  from langchain.embeddings.openai import OpenAIEmbeddings
6
  from langchain.text_splitter import CharacterTextSplitter
7
  from langchain.chat_models import AzureChatOpenAI
 
19
  from langchain.chains import LLMMathChain
20
  from langchain.memory import ConversationBufferMemory
21
  from langchain.agents import ZeroShotAgent, AgentExecutor
22
+ from langchain.agents import OpenAIMultiFunctionsAgent
23
+ from langchain.prompts import MessagesPlaceholder
24
+ from langchain.schema.messages import (
25
+ AIMessage,
26
+ BaseMessage,
27
+ FunctionMessage,
28
+ SystemMessage,
29
+ )
30
  from langchain import LLMChain
31
  import azure.cognitiveservices.speech as speechsdk
32
  import requests
 
59
 
60
 
61
  memory = ConversationBufferMemory(memory_key="chat_history")
62
+
63
+ memory_openai = ConversationBufferMemory(memory_key="memory", return_messages=True)
64
+
65
  # Custom document loaders
66
  class MyElmLoader(UnstructuredEmailLoader):
67
  """Wrapper to fallback to text/plain when default does not work"""
 
424
  input_variables=["input", "chat_history", "agent_scratchpad"]
425
  )
426
 
427
+ prompt_openai = OpenAIMultiFunctionsAgent.create_prompt(
428
+ system_message = SystemMessage(
429
+ content="You are a helpful AI assistant."),
430
+ extra_prompt_messages = MessagesPlaceholder(variable_name="memory"),
431
+ )
432
+
433
  input_variables=["input", "chat_history", "agent_scratchpad"]
434
 
435
+ agent_ZEROSHOT_REACT = initialize_agent(tools, llm,
436
  # agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
437
  agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
438
  verbose = True,
 
453
  )
454
 
455
  llm_chain = LLMChain(llm=llm, prompt=prompt)
456
+
457
+ llm_chain_openai = LLMChain(llm=llm, prompt=prompt_openai, verbose=True)
458
+
459
  agent_core = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
460
+
461
+ agent_core_openai = OpenAIMultiFunctionsAgent(llm_chain=llm_chain_openai, tools=tools, verbose=True)
462
+
463
+ agent_ZEROSHOT_AGENT = AgentExecutor.from_agent_and_tools(
464
  agent=agent_core,
465
  tools=tools,
466
  verbose=True,
 
469
  max_iterations = int(os.getenv("max_iterations")),
470
  early_stopping_method="generate",
471
  )
472
+
473
+ agent_OPENAI_MULTI = AgentExecutor.from_agent_and_tools(
474
+ agent=agent_core_openai,
475
+ tools=tools,
476
+ verbose=True,
477
+ memory=memory_openai,
478
+ handle_parsing_errors = True,
479
+ max_iterations = int(os.getenv("max_iterations")),
480
+ early_stopping_method="generate",
481
+ )
482
+
483
+ # agent.max_execution_time = int(os.getenv("max_iterations"))
484
+ # agent.handle_parsing_errors = True
485
+ # agent.early_stopping_method = "generate"
486
+
487
+ agent = agent_ZEROSHOT_AGENT
488
 
489
  print(agent.agent.llm_chain.prompt.template)
490
  # print(agent.agent.llm_chain.prompt)
491
 
492
  global vectordb
493
+ # vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
494
  global vectordb_p
495
  vectordb_p = Pinecone.from_existing_index(index_name, embeddings)
496
 
 
646
  source = res['source_documents']
647
  return response, source
648
 
649
+ # def CreatDb():
650
+ # '''
651
+ # Funtion to creat chromadb DB based on with all docs
652
+ # '''
653
+ # global vectordb
654
+ # loader = DirectoryLoader('./documents', glob='**/*.txt')
655
+ # documents = loader.load()
656
+ # text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=200)
657
+ # split_docs = text_splitter.split_documents(documents)
658
+ # print(split_docs)
659
+ # vectordb = Chroma.from_documents(split_docs, embeddings, persist_directory='db')
660
+ # vectordb.persist()
661
 
662
  def QAQuery(question: str):
663
  global vectordb