Spaces:
Sleeping
Sleeping
DEBUG: llModel
Browse files- functions.py +2 -2
functions.py
CHANGED
@@ -208,7 +208,7 @@ def trimMessages(chain_input):
|
|
208 |
return True
|
209 |
|
210 |
|
211 |
-
def answerQuery(query: str, vectorstore: str, llmModel: str = "
|
212 |
global prompt
|
213 |
global client
|
214 |
global sources
|
@@ -242,7 +242,7 @@ def answerQuery(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192")
|
|
242 |
history_messages_key="chatHistory"
|
243 |
)
|
244 |
chain = RunnablePassthrough.assign(messages_trimmed=trimMessages) | messageChain
|
245 |
-
followUpChain = followUpPrompt | ChatGroq(model_name="
|
246 |
output = chain.invoke(
|
247 |
{"question": query},
|
248 |
{"configurable": {"session_id": vectorStoreName}}
|
|
|
208 |
return True
|
209 |
|
210 |
|
211 |
+
def answerQuery(query: str, vectorstore: str, llmModel: str = "llama-3.1-70b-versatile") -> str:
|
212 |
global prompt
|
213 |
global client
|
214 |
global sources
|
|
|
242 |
history_messages_key="chatHistory"
|
243 |
)
|
244 |
chain = RunnablePassthrough.assign(messages_trimmed=trimMessages) | messageChain
|
245 |
+
followUpChain = followUpPrompt | ChatGroq(model_name="llama-3.1-70b-versatile", temperature=0) | jsonParser
|
246 |
output = chain.invoke(
|
247 |
{"question": query},
|
248 |
{"configurable": {"session_id": vectorStoreName}}
|