Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -324,6 +324,7 @@ def search_glossary(query):
|
|
324 |
# π΅οΈββοΈ Searching the glossary for: query
|
325 |
all_results = ""
|
326 |
#st.markdown(f"- {query}")
|
|
|
327 |
|
328 |
# π ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
|
329 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
@@ -334,9 +335,11 @@ def search_glossary(query):
|
|
334 |
stream_outputs=True,
|
335 |
api_name="/ask_llm"
|
336 |
)
|
|
|
337 |
st.markdown(result)
|
338 |
-
|
339 |
st.code(result, language="python", line_numbers=True)
|
|
|
|
|
340 |
# π ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
|
341 |
result2 = client.predict(
|
342 |
prompt=query,
|
@@ -344,9 +347,11 @@ def search_glossary(query):
|
|
344 |
stream_outputs=True,
|
345 |
api_name="/ask_llm"
|
346 |
)
|
|
|
347 |
st.markdown(result2)
|
348 |
-
|
349 |
st.code(result2, language="python", line_numbers=True)
|
|
|
|
|
350 |
# π ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
|
351 |
result3 = client.predict(
|
352 |
prompt=query,
|
@@ -354,9 +359,11 @@ def search_glossary(query):
|
|
354 |
stream_outputs=True,
|
355 |
api_name="/ask_llm"
|
356 |
)
|
|
|
357 |
st.markdown(result3)
|
358 |
-
|
359 |
st.code(result3, language="python", line_numbers=True)
|
|
|
|
|
360 |
# π ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /update_with_rag_md
|
361 |
response2 = client.predict(
|
362 |
message=query, # str in 'parameter_13' Textbox component
|
@@ -365,9 +372,11 @@ def search_glossary(query):
|
|
365 |
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
|
366 |
api_name="/update_with_rag_md"
|
367 |
)
|
|
|
368 |
st.markdown(response2[0])
|
369 |
st.code(response2[0], language="python", line_numbers=True, wrap_lines=True)
|
370 |
|
|
|
371 |
st.markdown(response2[1])
|
372 |
st.code(response2[1], language="python", line_numbers=True, wrap_lines=True)
|
373 |
|
@@ -386,15 +395,15 @@ def search_glossary(query):
|
|
386 |
except:
|
387 |
st.markdown('2 error')
|
388 |
try:
|
389 |
-
filename = generate_filename(
|
390 |
-
create_file(filename, query,
|
391 |
-
st.session_state.chat_history.append({"assistant": query, "ArXiV":
|
392 |
except:
|
393 |
st.markdown('3 error')
|
394 |
try:
|
395 |
-
filename = generate_filename(response2, "md")
|
396 |
-
create_file(filename, query, response2)
|
397 |
-
st.session_state.chat_history.append({"assistant": query, "ArXiV": response2})
|
398 |
except:
|
399 |
st.markdown('4 error')
|
400 |
|
|
|
324 |
# π΅οΈββοΈ Searching the glossary for: query
|
325 |
all_results = ""
|
326 |
#st.markdown(f"- {query}")
|
327 |
+
|
328 |
|
329 |
# π ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
|
330 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
|
|
335 |
stream_outputs=True,
|
336 |
api_name="/ask_llm"
|
337 |
)
|
338 |
+
st.markdown("# Mixtral-8x7B-Instruct-v0.1")
|
339 |
st.markdown(result)
|
|
|
340 |
st.code(result, language="python", line_numbers=True)
|
341 |
+
|
342 |
+
|
343 |
# π ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
|
344 |
result2 = client.predict(
|
345 |
prompt=query,
|
|
|
347 |
stream_outputs=True,
|
348 |
api_name="/ask_llm"
|
349 |
)
|
350 |
+
st.markdown("# Mistral-7B-Instruct-v0.2")
|
351 |
st.markdown(result2)
|
|
|
352 |
st.code(result2, language="python", line_numbers=True)
|
353 |
+
|
354 |
+
|
355 |
# π ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /ask_llm
|
356 |
result3 = client.predict(
|
357 |
prompt=query,
|
|
|
359 |
stream_outputs=True,
|
360 |
api_name="/ask_llm"
|
361 |
)
|
362 |
+
st.markdown("# Gemma-7b-it")
|
363 |
st.markdown(result3)
|
|
|
364 |
st.code(result3, language="python", line_numbers=True)
|
365 |
+
|
366 |
+
|
367 |
# π ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM - api_name: /update_with_rag_md
|
368 |
response2 = client.predict(
|
369 |
message=query, # str in 'parameter_13' Textbox component
|
|
|
372 |
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
|
373 |
api_name="/update_with_rag_md"
|
374 |
)
|
375 |
+
st.markdown("# Mistral-7B-Instruct-v0.2 update_with_rag_md 0")
|
376 |
st.markdown(response2[0])
|
377 |
st.code(response2[0], language="python", line_numbers=True, wrap_lines=True)
|
378 |
|
379 |
+
st.markdown("# Mistral-7B-Instruct-v0.2 update_with_rag_md 1")
|
380 |
st.markdown(response2[1])
|
381 |
st.code(response2[1], language="python", line_numbers=True, wrap_lines=True)
|
382 |
|
|
|
395 |
except:
|
396 |
st.markdown('2 error')
|
397 |
try:
|
398 |
+
filename = generate_filename(response2[0], "md")
|
399 |
+
create_file(filename, query, response2[0])
|
400 |
+
st.session_state.chat_history.append({"assistant": query, "ArXiV": response2[0]})
|
401 |
except:
|
402 |
st.markdown('3 error')
|
403 |
try:
|
404 |
+
filename = generate_filename(response2[1], "md")
|
405 |
+
create_file(filename, query, response2[1])
|
406 |
+
st.session_state.chat_history.append({"assistant": query, "ArXiV": response2[1]})
|
407 |
except:
|
408 |
st.markdown('4 error')
|
409 |
|