Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -744,7 +744,7 @@ async def digi_followup_agent_v2(query: FollowupQueryModel, background_tasks: Ba
|
|
744 |
full_response = ""
|
745 |
for content in chat_with_llama_stream(limited_conversation, model=query.model_id):
|
746 |
full_response += content
|
747 |
-
yield json.dumps({"
|
748 |
|
749 |
logger.info(f"LLM RAW response for query: {query.query}: {full_response}")
|
750 |
response_content, interact,tools = parse_followup_and_tools(full_response)
|
@@ -753,7 +753,7 @@ async def digi_followup_agent_v2(query: FollowupQueryModel, background_tasks: Ba
|
|
753 |
"response": response_content,
|
754 |
"clarification": interact
|
755 |
}
|
756 |
-
yield json.dumps({"
|
757 |
|
758 |
# Add the assistant's response to the conversation history
|
759 |
conversations[query.conversation_id].append({"role": "assistant", "content": full_response})
|
|
|
744 |
full_response = ""
|
745 |
for content in chat_with_llama_stream(limited_conversation, model=query.model_id):
|
746 |
full_response += content
|
747 |
+
yield json.dumps({"type": "response","content": content}) + "\n"
|
748 |
|
749 |
logger.info(f"LLM RAW response for query: {query.query}: {full_response}")
|
750 |
response_content, interact,tools = parse_followup_and_tools(full_response)
|
|
|
753 |
"response": response_content,
|
754 |
"clarification": interact
|
755 |
}
|
756 |
+
yield json.dumps({"type": "interact","content": result}) +"\n"
|
757 |
|
758 |
# Add the assistant's response to the conversation history
|
759 |
conversations[query.conversation_id].append({"role": "assistant", "content": full_response})
|