sabazo commited on
Commit
68c2be0
2 Parent(s): 5d55367 8fdc6bf

Merge pull request #2 from almutareb/1_resolve_LC_warnings

Browse files
app/api/v1/agents/hf_mixtral_agent.py CHANGED
@@ -1,5 +1,5 @@
1
  # HF libraries
2
- from langchain_community.llms import HuggingFaceEndpoint
3
  from langchain.agents import AgentExecutor
4
  from langchain.agents.format_scratchpad import format_log_to_str
5
  from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
@@ -21,7 +21,7 @@ from app.templates.react_json_with_memory import template_system
21
  from app.utils import logger
22
  from app.utils import utils
23
  from langchain.globals import set_llm_cache
24
- from langchain.cache import SQLiteCache
25
  from app.utils.callback import (
26
  CustomAsyncCallbackHandler,
27
  CustomFinalStreamingStdOutCallbackHandler,
 
1
  # HF libraries
2
+ from langchain_huggingface import HuggingFaceEndpoint
3
  from langchain.agents import AgentExecutor
4
  from langchain.agents.format_scratchpad import format_log_to_str
5
  from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
 
21
  from app.utils import logger
22
  from app.utils import utils
23
  from langchain.globals import set_llm_cache
24
+ from langchain_community.cache import SQLiteCache
25
  from app.utils.callback import (
26
  CustomAsyncCallbackHandler,
27
  CustomFinalStreamingStdOutCallbackHandler,
app/structured_tools/structured_tools.py CHANGED
@@ -4,7 +4,7 @@ from langchain_community.retrievers import ArxivRetriever
4
  from langchain_community.tools import WikipediaQueryRun
5
  from langchain_community.utilities import WikipediaAPIWrapper
6
  #from langchain.tools import Tool
7
- from langchain_community.utilities import GoogleSearchAPIWrapper
8
  from langchain_community.embeddings.sentence_transformer import (
9
  SentenceTransformerEmbeddings,
10
  )
@@ -52,11 +52,11 @@ def memory_search(query:str) -> str:
52
  collection_name = settings.CONVERSATION_COLLECTION_NAME
53
  #store using envar
54
 
55
- # embedding_function = SentenceTransformerEmbeddings(
56
- # model_name=settings.EMBEDDING_MODEL
57
- # #model_name=os.getenv("EMBEDDING_MODEL"),
58
- # )
59
- embedding_function = GPT4AllEmbeddings()
60
 
61
  vector_db = Chroma(
62
  client=client, # client for Chroma
@@ -80,11 +80,11 @@ def knowledgeBase_search(query:str) -> str:
80
  collection_name="ArxivPapers"
81
  #store using envar
82
 
83
- # embedding_function = SentenceTransformerEmbeddings(
84
- # #model_name=os.getenv("EMBEDDING_MODEL"),
85
- # model_name=settings.EMBEDDING_MODEL
86
- # )
87
- embedding_function = GPT4AllEmbeddings()
88
 
89
  vector_db = Chroma(
90
  client=client, # client for Chroma
 
4
  from langchain_community.tools import WikipediaQueryRun
5
  from langchain_community.utilities import WikipediaAPIWrapper
6
  #from langchain.tools import Tool
7
+ from langchain_google_community import GoogleSearchAPIWrapper
8
  from langchain_community.embeddings.sentence_transformer import (
9
  SentenceTransformerEmbeddings,
10
  )
 
52
  collection_name = settings.CONVERSATION_COLLECTION_NAME
53
  #store using envar
54
 
55
+ embedding_function = SentenceTransformerEmbeddings(
56
+ model_name=settings.EMBEDDING_MODEL
57
+ #model_name=os.getenv("EMBEDDING_MODEL"),
58
+ )
59
+ #embedding_function = GPT4AllEmbeddings()
60
 
61
  vector_db = Chroma(
62
  client=client, # client for Chroma
 
80
  collection_name="ArxivPapers"
81
  #store using envar
82
 
83
+ embedding_function = SentenceTransformerEmbeddings(
84
+ #model_name=os.getenv("EMBEDDING_MODEL"),
85
+ model_name=settings.EMBEDDING_MODEL
86
+ )
87
+ #embedding_function = GPT4AllEmbeddings()
88
 
89
  vector_db = Chroma(
90
  client=client, # client for Chroma
requirements.txt CHANGED
@@ -1,5 +1,7 @@
1
  langchain-community
2
  langchain
 
 
3
  google-search-results
4
  langchainhub
5
  arxiv
 
1
  langchain-community
2
  langchain
3
+ langchain-google-community
4
+ langchain-huggingface
5
  google-search-results
6
  langchainhub
7
  arxiv