Spaces:
Running
Running
refactor
Browse files
main.py
CHANGED
@@ -234,13 +234,10 @@ async def coding_assistant(query: QueryModel, background_tasks: BackgroundTasks,
|
|
234 |
|
235 |
# New functions for news assistant
|
236 |
|
237 |
-
def internet_search(query,
|
238 |
-
logger.info(f"Performing internet search for query: {query}, type: {
|
239 |
-
if
|
240 |
-
|
241 |
-
else:
|
242 |
-
url = "https://api.search.brave.com/res/v1/news/search"
|
243 |
-
|
244 |
headers = {
|
245 |
"Accept": "application/json",
|
246 |
"Accept-Encoding": "gzip",
|
@@ -254,73 +251,52 @@ def internet_search(query, type = "web", num_results=20):
|
|
254 |
logger.error(f"Failed to fetch search results. Status code: {response.status_code}")
|
255 |
return []
|
256 |
|
257 |
-
if
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
processed_results = []
|
262 |
-
|
263 |
-
for item in search_data:
|
264 |
-
if not item.get("extra_snippets"):
|
265 |
-
continue
|
266 |
-
|
267 |
-
result = {
|
268 |
"title": item["title"],
|
269 |
"snippet": item["extra_snippets"][0],
|
270 |
"last_updated": item.get("age", "")
|
271 |
}
|
272 |
-
|
|
|
|
|
273 |
|
274 |
logger.info(f"Retrieved {len(processed_results)} search results")
|
275 |
-
return processed_results
|
276 |
|
277 |
@lru_cache(maxsize=100)
|
278 |
def cached_internet_search(query: str):
|
279 |
logger.info(f"Performing cached internet search for query: {query}")
|
280 |
-
return internet_search(query,
|
281 |
-
|
282 |
|
283 |
-
def
|
284 |
-
logger.info(f"Analyzing
|
285 |
-
news_data = cached_internet_search(query)
|
286 |
|
287 |
-
if
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
|
|
|
|
|
|
|
|
|
|
294 |
|
|
|
295 |
messages = [
|
296 |
-
{"role": "system", "content":
|
297 |
{"role": "user", "content": prompt}
|
298 |
]
|
299 |
|
300 |
-
logger.info("
|
301 |
return messages
|
302 |
|
303 |
-
|
304 |
-
async def news_assistant(query: NewsQueryModel, api_key: str = Depends(verify_api_key)):
|
305 |
-
"""
|
306 |
-
News assistant endpoint that provides summaries and analysis of recent news based on user queries.
|
307 |
-
Requires API Key authentication via X-API-Key header.
|
308 |
-
"""
|
309 |
-
logger.info(f"Received news assistant query: {query.query}")
|
310 |
-
messages = analyze_news(query.query)
|
311 |
-
|
312 |
-
if not messages:
|
313 |
-
logger.error("Failed to fetch news data")
|
314 |
-
raise HTTPException(status_code=500, detail="Failed to fetch news data")
|
315 |
-
|
316 |
-
def process_response():
|
317 |
-
for content in chat_with_llama_stream(messages, model=query.model_id):
|
318 |
-
yield content
|
319 |
-
logger.info(f"Completed news assistant response for query: {query.query}")
|
320 |
-
|
321 |
-
return StreamingResponse(process_response(), media_type="text/event-stream")
|
322 |
-
|
323 |
-
class SearchQueryModel(BaseModel):
|
324 |
query: str = Field(..., description="Search query")
|
325 |
model_id: ModelID = Field(
|
326 |
default="openai/gpt-4o-mini",
|
@@ -334,49 +310,40 @@ class SearchQueryModel(BaseModel):
|
|
334 |
}
|
335 |
}
|
336 |
|
337 |
-
def
|
338 |
-
logger.info(f"
|
339 |
-
|
340 |
-
|
341 |
-
if not search_data:
|
342 |
-
logger.error("Failed to fetch search data")
|
343 |
-
return "Failed to fetch search data.", []
|
344 |
-
|
345 |
-
# Prepare the prompt for the AI
|
346 |
-
prompt = generate_search_prompt(query, search_data)
|
347 |
-
|
348 |
-
messages = [
|
349 |
-
{"role": "system", "content": SEARCH_ASSISTANT_PROMPT},
|
350 |
-
{"role": "user", "content": prompt}
|
351 |
-
]
|
352 |
-
|
353 |
-
logger.info("Search results analysis completed")
|
354 |
-
return messages
|
355 |
-
|
356 |
-
@app.post("/search-assistant")
|
357 |
-
async def search_assistant(query: SearchQueryModel, api_key: str = Depends(verify_api_key)):
|
358 |
-
"""
|
359 |
-
Search assistant endpoint that provides summaries and analysis of web search results based on user queries.
|
360 |
-
Requires API Key authentication via X-API-Key header.
|
361 |
-
"""
|
362 |
-
logger.info(f"Received search assistant query: {query.query}")
|
363 |
-
messages = analyze_search_results(query.query)
|
364 |
|
365 |
if not messages:
|
366 |
-
logger.error("Failed to fetch
|
367 |
-
raise HTTPException(status_code=500, detail="Failed to fetch
|
368 |
-
|
369 |
def process_response():
|
370 |
logger.info(f"Generating response using LLM: {messages}")
|
371 |
full_response = ""
|
372 |
for content in chat_with_llama_stream(messages, model=query.model_id):
|
373 |
-
full_response+=content
|
374 |
yield content
|
375 |
-
logger.info(f"Completed
|
376 |
logger.info(f"LLM Response: {full_response}")
|
377 |
|
378 |
return StreamingResponse(process_response(), media_type="text/event-stream")
|
379 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
380 |
|
381 |
from pydantic import BaseModel, Field
|
382 |
import yaml
|
@@ -624,6 +591,7 @@ async def followup_agent(query: FollowupQueryModel, background_tasks: Background
|
|
624 |
|
625 |
return StreamingResponse(process_response(), media_type="text/event-stream")
|
626 |
|
|
|
627 |
@app.post("/v3/followup-agent")
|
628 |
async def followup_agent(query: FollowupQueryModel, background_tasks: BackgroundTasks, api_key: str = Depends(verify_api_key)):
|
629 |
"""
|
|
|
234 |
|
235 |
# New functions for news assistant
|
236 |
|
237 |
+
def internet_search(query, search_type="web", num_results=20):
|
238 |
+
logger.info(f"Performing internet search for query: {query}, type: {search_type}")
|
239 |
+
url = f"https://api.search.brave.com/res/v1/{'web' if search_type == 'web' else 'news'}/search"
|
240 |
+
|
|
|
|
|
|
|
241 |
headers = {
|
242 |
"Accept": "application/json",
|
243 |
"Accept-Encoding": "gzip",
|
|
|
251 |
logger.error(f"Failed to fetch search results. Status code: {response.status_code}")
|
252 |
return []
|
253 |
|
254 |
+
search_data = response.json()["web"]["results"] if search_type == "web" else response.json()["results"]
|
255 |
+
|
256 |
+
processed_results = [
|
257 |
+
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
"title": item["title"],
|
259 |
"snippet": item["extra_snippets"][0],
|
260 |
"last_updated": item.get("age", "")
|
261 |
}
|
262 |
+
for item in search_data
|
263 |
+
if item.get("extra_snippets")
|
264 |
+
][:num_results]
|
265 |
|
266 |
logger.info(f"Retrieved {len(processed_results)} search results")
|
267 |
+
return processed_results
|
268 |
|
269 |
@lru_cache(maxsize=100)
|
270 |
def cached_internet_search(query: str):
|
271 |
logger.info(f"Performing cached internet search for query: {query}")
|
272 |
+
return internet_search(query, search_type="news")
|
|
|
273 |
|
274 |
+
def analyze_data(query, data_type="news"):
|
275 |
+
logger.info(f"Analyzing {data_type} for query: {query}")
|
|
|
276 |
|
277 |
+
if data_type == "news":
|
278 |
+
data = cached_internet_search(query)
|
279 |
+
prompt_generator = generate_news_prompt
|
280 |
+
system_prompt = NEWS_ASSISTANT_PROMPT
|
281 |
+
else:
|
282 |
+
data = internet_search(query, search_type="web")
|
283 |
+
prompt_generator = generate_search_prompt
|
284 |
+
system_prompt = SEARCH_ASSISTANT_PROMPT
|
285 |
+
|
286 |
+
if not data:
|
287 |
+
logger.error(f"Failed to fetch {data_type} data")
|
288 |
+
return None
|
289 |
|
290 |
+
prompt = prompt_generator(query, data)
|
291 |
messages = [
|
292 |
+
{"role": "system", "content": system_prompt},
|
293 |
{"role": "user", "content": prompt}
|
294 |
]
|
295 |
|
296 |
+
logger.info(f"{data_type.capitalize()} analysis completed")
|
297 |
return messages
|
298 |
|
299 |
+
class QueryModel(BaseModel):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
query: str = Field(..., description="Search query")
|
301 |
model_id: ModelID = Field(
|
302 |
default="openai/gpt-4o-mini",
|
|
|
310 |
}
|
311 |
}
|
312 |
|
313 |
+
def assistant_api(query, data_type):
|
314 |
+
logger.info(f"Received {data_type} assistant query: {query}")
|
315 |
+
messages = analyze_data(query, data_type)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
316 |
|
317 |
if not messages:
|
318 |
+
logger.error(f"Failed to fetch {data_type} data")
|
319 |
+
raise HTTPException(status_code=500, detail=f"Failed to fetch {data_type} data")
|
320 |
+
|
321 |
def process_response():
|
322 |
logger.info(f"Generating response using LLM: {messages}")
|
323 |
full_response = ""
|
324 |
for content in chat_with_llama_stream(messages, model=query.model_id):
|
325 |
+
full_response += content
|
326 |
yield content
|
327 |
+
logger.info(f"Completed {data_type} assistant response for query: {query}")
|
328 |
logger.info(f"LLM Response: {full_response}")
|
329 |
|
330 |
return StreamingResponse(process_response(), media_type="text/event-stream")
|
331 |
|
332 |
+
@app.post("/news-assistant")
|
333 |
+
async def news_assistant(query: QueryModel, api_key: str = Depends(verify_api_key)):
|
334 |
+
"""
|
335 |
+
News assistant endpoint that provides summaries and analysis of recent news based on user queries.
|
336 |
+
Requires API Key authentication via X-API-Key header.
|
337 |
+
"""
|
338 |
+
return assistant_api(query.query, "news")
|
339 |
+
|
340 |
+
@app.post("/search-assistant")
|
341 |
+
async def search_assistant(query: QueryModel, api_key: str = Depends(verify_api_key)):
|
342 |
+
"""
|
343 |
+
Search assistant endpoint that provides summaries and analysis of web search results based on user queries.
|
344 |
+
Requires API Key authentication via X-API-Key header.
|
345 |
+
"""
|
346 |
+
return assistant_api(query.query, "search")
|
347 |
|
348 |
from pydantic import BaseModel, Field
|
349 |
import yaml
|
|
|
591 |
|
592 |
return StreamingResponse(process_response(), media_type="text/event-stream")
|
593 |
|
594 |
+
|
595 |
@app.post("/v3/followup-agent")
|
596 |
async def followup_agent(query: FollowupQueryModel, background_tasks: BackgroundTasks, api_key: str = Depends(verify_api_key)):
|
597 |
"""
|