pvanand commited on
Commit
60ea137
·
verified ·
1 Parent(s): 2d34a18

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +7 -7
main.py CHANGED
@@ -105,11 +105,11 @@ def chat_with_llama_stream(messages, model="gpt-3.5-turbo", max_llm_history=4, m
105
  )
106
 
107
  full_response = ""
108
- for chunk in response:
109
- if chunk.choices[0].delta.content is not None:
110
- content = chunk.choices[0].delta.content
111
- full_response += content
112
- yield content
113
 
114
  # After streaming, add the full response to the conversation history
115
  messages.append({"role": "assistant", "content": full_response})
@@ -260,9 +260,9 @@ async def news_assistant(query: NewsQueryModel, api_key: str = Depends(verify_ap
260
  raise HTTPException(status_code=500, detail="Failed to fetch news data")
261
 
262
  def process_response():
263
- for content in chat_with_llama_stream(messages, model="google/gemini-pro-1.5"):
264
  yield content
265
-
266
  return StreamingResponse(process_response(), media_type="text/event-stream")
267
 
268
  if __name__ == "__main__":
 
105
  )
106
 
107
  full_response = ""
108
+ for event in openai_stream:
109
+ if "content" in event["choices"][0].delta:
110
+ current_response = event["choices"][0].delta.content
111
+ full_response +=current_response
112
+ yield current_response
113
 
114
  # After streaming, add the full response to the conversation history
115
  messages.append({"role": "assistant", "content": full_response})
 
260
  raise HTTPException(status_code=500, detail="Failed to fetch news data")
261
 
262
  def process_response():
263
+ for content in chat_with_llama_stream(messages, model="meta-llama/llama-3-70b-instruct"):
264
  yield content
265
+ #meta-llama/llama-3-70b-instruct google/gemini-pro-1.5
266
  return StreamingResponse(process_response(), media_type="text/event-stream")
267
 
268
  if __name__ == "__main__":