awacke1 commited on
Commit
b6b716b
Β·
verified Β·
1 Parent(s): 008db5b

Update backup11.app.py

Browse files
Files changed (1) hide show
  1. backup11.app.py +36 -40
backup11.app.py CHANGED
@@ -54,7 +54,7 @@ LOCAL_APP_URL = "https://huggingface.co/spaces/awacke1/AzureCosmosDBUI"
54
  CosmosDBUrl = 'https://portal.azure.com/#@AaronCWackergmail.onmicrosoft.com/resource/subscriptions/003fba60-5b3f-48f4-ab36-3ed11bc40816/resourceGroups/datasets/providers/Microsoft.DocumentDB/databaseAccounts/acae-afd/dataExplorer'
55
 
56
  # πŸ€– Anthropic configuration - Teaching machines to be more human (and funnier)
57
- client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
58
 
59
  # 🧠 Initialize session state - Because even apps need a good memory
60
  if "chat_history" not in st.session_state:
@@ -311,22 +311,33 @@ def archive_current_container(database_name, container_name, client):
311
  except Exception as e:
312
  return f"An error occurred while archiving data: {str(e)} 😒"
313
 
 
 
 
 
 
 
 
 
 
 
 
314
 
315
  # πŸ” Search glossary - Finding needles in digital haystacks
316
  def search_glossary(query):
317
  st.markdown(f"### πŸ” SearchGlossary for: {query}")
318
- # Dropdown for model selection
319
- model_options = ['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None']
320
- #model_choice = st.selectbox('🧠 Select LLM Model', options=model_options, index=1)
321
- # Dropdown for database selection
322
  database_options = ['Semantic Search', 'Arxiv Search - Latest - (EXPERIMENTAL)']
323
- #database_choice = st.selectbox('πŸ“š Select Database', options=database_options, index=0)
 
324
  # πŸ•΅οΈβ€β™‚οΈ Searching the glossary for: query
325
  all_results = ""
326
- #st.markdown(f"- {query}")
327
-
 
328
 
329
- # πŸ” ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
330
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
331
  # πŸ” ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
332
  result = client.predict(
@@ -351,11 +362,6 @@ def search_glossary(query):
351
  st.markdown(result2)
352
  #st.code(result2, language="python", line_numbers=True)
353
 
354
-
355
-
356
-
357
-
358
-
359
  # πŸ” ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /update_with_rag_md
360
  response2 = client.predict(
361
  message=query, # str in 'parameter_13' Textbox component
@@ -373,33 +379,23 @@ def search_glossary(query):
373
  #st.code(response2[1], language="python", line_numbers=True, wrap_lines=True)
374
 
375
 
376
- # Persist AI Results to Markdown Files
377
- try:
378
- filename = generate_filename(result, "md")
379
- create_file(filename, query, result)
380
- #st.session_state.chat_history.append({"assistant": query, "ArXiV": result})
381
- except:
382
- st.markdown('1 error')
383
- try:
384
- filename = generate_filename(result2, "md")
385
- create_file(filename, query, result2)
386
- #st.session_state.chat_history.append({"assistant": query, "ArXiV": result2})
387
- except:
388
- st.markdown('2 error')
389
- try:
390
- filename = generate_filename(response2[0], "md")
391
- create_file(filename, query, response2[0])
392
- #st.session_state.chat_history.append({"assistant": query, "ArXiV": response2[0]})
393
- except:
394
- st.markdown('3 error')
395
- try:
396
- filename = generate_filename(response2[1], "md")
397
- create_file(filename, query, response2[1])
398
- #st.session_state.chat_history.append({"assistant": query, "ArXiV": response2[1]})
399
- except:
400
- st.markdown('4 error')
401
 
402
-
403
  return result, result2, response2
404
 
405
 
 
54
  CosmosDBUrl = 'https://portal.azure.com/#@AaronCWackergmail.onmicrosoft.com/resource/subscriptions/003fba60-5b3f-48f4-ab36-3ed11bc40816/resourceGroups/datasets/providers/Microsoft.DocumentDB/databaseAccounts/acae-afd/dataExplorer'
55
 
56
  # πŸ€– Anthropic configuration - Teaching machines to be more human (and funnier)
57
+ anthropicclient = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
58
 
59
  # 🧠 Initialize session state - Because even apps need a good memory
60
  if "chat_history" not in st.session_state:
 
311
  except Exception as e:
312
  return f"An error occurred while archiving data: {str(e)} 😒"
313
 
314
+ def gen_AI_IO_filename(display_query, output):
315
+ # Get current time in Central Time Zone with milliseconds
316
+ now_central = datetime.now(pytz.timezone("America/Chicago"))
317
+ timestamp = now_central.strftime("%Y-%m-%d-%I-%M-%S-%f-%p")
318
+
319
+ # Limit components to prevent excessive filename length
320
+ display_query = display_query[:50] # Truncate display_query to 50 chars
321
+ output_snippet = re.sub(r'[^A-Za-z0-9]+', '_', output[:100]) # Truncate output_snippet to 100 chars
322
+
323
+ filename = f"{timestamp} - {display_query} - {output_snippet}.md"
324
+ return filename
325
 
326
  # πŸ” Search glossary - Finding needles in digital haystacks
327
  def search_glossary(query):
328
  st.markdown(f"### πŸ” SearchGlossary for: {query}")
329
+ model_options = ['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2']
330
+ model_choice = st.selectbox('🧠 Select LLM Model', options=model_options, index=1, key=f"model_choice_{id(query)}")
 
 
331
  database_options = ['Semantic Search', 'Arxiv Search - Latest - (EXPERIMENTAL)']
332
+ database_choice = st.selectbox('πŸ“š Select Database', options=database_options, index=0, key=f"database_choice_{id(query)}")
333
+
334
  # πŸ•΅οΈβ€β™‚οΈ Searching the glossary for: query
335
  all_results = ""
336
+ # Limit the query display to 80 characters
337
+ display_query = query[:80] + "..." if len(query) > 80 else query
338
+ st.markdown(f"πŸ•΅οΈβ€β™‚οΈ Running ArXiV AI Analysis with Query: {display_query} - ML model: {model_choice} and Option: {database_options}")
339
 
340
+ # πŸ” ArXiV RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
341
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
342
  # πŸ” ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
343
  result = client.predict(
 
362
  st.markdown(result2)
363
  #st.code(result2, language="python", line_numbers=True)
364
 
 
 
 
 
 
365
  # πŸ” ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /update_with_rag_md
366
  response2 = client.predict(
367
  message=query, # str in 'parameter_13' Textbox component
 
379
  #st.code(response2[1], language="python", line_numbers=True, wrap_lines=True)
380
 
381
 
382
+ # βœ… Persist AI Results to Markdown Files
383
+ filename = gen_AI_IO_filename(display_query, result)
384
+ create_file(filename, query, result)
385
+ st.markdown(f"βœ… File saved as: `{filename}`")
386
+
387
+ filename = gen_AI_IO_filename(display_query, result2)
388
+ create_file(filename, query, result2)
389
+ st.markdown(f"βœ… File saved as: `{filename}`")
390
+
391
+ filename = gen_AI_IO_filename(display_query, response2[0])
392
+ create_file(filename, query, response2[0])
393
+ st.markdown(f"βœ… File saved as: `{filename}`")
394
+
395
+ filename = gen_AI_IO_filename(display_query, response2[1])
396
+ create_file(filename, query, response2[1])
397
+ st.markdown(f"βœ… File saved as: `{filename}`")
 
 
 
 
 
 
 
 
 
398
 
 
399
  return result, result2, response2
400
 
401