Ilyas KHIAT commited on
Commit
5e72909
·
1 Parent(s): b4ea9f9
Files changed (2) hide show
  1. main.py +1 -0
  2. rag.py +2 -3
main.py CHANGED
@@ -133,6 +133,7 @@ async def generate(user_input: UserInput):
133
  except Exception as e:
134
  return {"message": str(e)}
135
 
 
136
  @app.post("/whatif")
137
  async def generate_whatif(whatif_input: WhatifInput):
138
  try:
 
133
  except Exception as e:
134
  return {"message": str(e)}
135
 
136
+
137
  @app.post("/whatif")
138
  async def generate_whatif(whatif_input: WhatifInput):
139
  try:
rag.py CHANGED
@@ -11,7 +11,6 @@ import random
11
  from itext2kg.models import KnowledgeGraph
12
  from langchain.text_splitter import RecursiveCharacterTextSplitter
13
 
14
-
15
  import faiss
16
  from langchain_community.docstore.in_memory import InMemoryDocstore
17
 
@@ -27,7 +26,7 @@ import unicodedata
27
  load_dotenv()
28
  index_name = os.environ.get("INDEX_NAME")
29
  # Global initialization
30
- embedding_model = "text-embedding-3-large"
31
 
32
  embedding = OpenAIEmbeddings(model=embedding_model)
33
  # vector_store = PineconeVectorStore(index=index_name, embedding=embedding)
@@ -183,7 +182,7 @@ def generate_whatif_stream(question:str,response:str, stream:bool = True) -> str
183
  context = retrieve_context_from_vectorestore(f"{question} {response}")
184
  print(f"Context: {context}")
185
 
186
- if generate_stream:
187
  return llm_chain.stream({"question":question,"response":response,"context":context})
188
  else:
189
  return llm_chain.invoke({"question":question,"response":response,"context":context})
 
11
  from itext2kg.models import KnowledgeGraph
12
  from langchain.text_splitter import RecursiveCharacterTextSplitter
13
 
 
14
  import faiss
15
  from langchain_community.docstore.in_memory import InMemoryDocstore
16
 
 
26
  load_dotenv()
27
  index_name = os.environ.get("INDEX_NAME")
28
  # Global initialization
29
+ embedding_model = "text-embedding-3-small"
30
 
31
  embedding = OpenAIEmbeddings(model=embedding_model)
32
  # vector_store = PineconeVectorStore(index=index_name, embedding=embedding)
 
182
  context = retrieve_context_from_vectorestore(f"{question} {response}")
183
  print(f"Context: {context}")
184
 
185
+ if stream:
186
  return llm_chain.stream({"question":question,"response":response,"context":context})
187
  else:
188
  return llm_chain.invoke({"question":question,"response":response,"context":context})