pvanand commited on
Commit
113e85f
1 Parent(s): b66bfbf

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +50 -0
main.py CHANGED
@@ -287,6 +287,56 @@ async def news_assistant(query: NewsQueryModel, api_key: str = Depends(verify_ap
287
  logger.debug("Starting to stream news assistant response")
288
  return StreamingResponse(process_response(), media_type="text/event-stream")
289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  if __name__ == "__main__":
291
  import uvicorn
292
  logger.info("Starting uvicorn server")
 
287
  logger.debug("Starting to stream news assistant response")
288
  return StreamingResponse(process_response(), media_type="text/event-stream")
289
 
290
+ class SearchQueryModel(BaseModel):
291
+ query: str = Field(..., description="Search query")
292
+ model_id: ModelID = Field(
293
+ default="meta-llama/llama-3-70b-instruct",
294
+ description="ID of the model to use for response generation"
295
+ )
296
+ class Config:
297
+ schema_extra = {
298
+ "example": {
299
+ "query": "What are the latest advancements in quantum computing?",
300
+ "model_id": "meta-llama/llama-3-70b-instruct"
301
+ }
302
+ }
303
+
304
+ def analyze_search_results(query):
305
+ search_data = internet_search(query, type="web")
306
+
307
+ if not search_data:
308
+ logger.error("Failed to fetch search data")
309
+ return "Failed to fetch search data.", []
310
+
311
+ # Prepare the prompt for the AI
312
+ prompt = generate_search_prompt(query, search_data)
313
+
314
+ messages = [
315
+ {"role": "system", "content": SEARCH_ASSISTANT_PROMPT},
316
+ {"role": "user", "content": prompt}
317
+ ]
318
+
319
+ return messages
320
+
321
+ @app.post("/search-assistant")
322
+ async def search_assistant(query: SearchQueryModel, api_key: str = Depends(verify_api_key)):
323
+ """
324
+ Search assistant endpoint that provides summaries and analysis of web search results based on user queries.
325
+ Requires API Key authentication via X-API-Key header.
326
+ """
327
+ messages = analyze_search_results(query.query)
328
+
329
+ if not messages:
330
+ raise HTTPException(status_code=500, detail="Failed to fetch search data")
331
+
332
+ def process_response():
333
+ for content in chat_with_llama_stream(messages, model=query.model_id):
334
+ yield content
335
+
336
+ logger.debug("Starting to stream news assistant response")
337
+ return StreamingResponse(process_response(), media_type="text/event-stream")
338
+
339
+
340
  if __name__ == "__main__":
341
  import uvicorn
342
  logger.info("Starting uvicorn server")