Update app.py
Browse files
app.py
CHANGED
@@ -91,7 +91,7 @@ def search_arxiv(query):
|
|
91 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
92 |
search_query = query
|
93 |
#top_n_results = st.slider(key='topnresults', label="Top n results as context", min_value=4, max_value=100, value=100)
|
94 |
-
search_source = st.sidebar.selectbox(key='searchsource', label="Search Source", ["Semantic Search - up to 10 Mar 2024", "Arxiv Search - Latest - (EXPERIMENTAL)"])
|
95 |
llm_model = st.sidebar.selectbox(key='llmmodel', label="LLM Model", ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "google/gemma-7b-it", "None"])
|
96 |
st.sidebar.markdown('### π ' + query)
|
97 |
result = client.predict(
|
|
|
91 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
92 |
search_query = query
|
93 |
#top_n_results = st.slider(key='topnresults', label="Top n results as context", min_value=4, max_value=100, value=100)
|
94 |
+
search_source = st.sidebar.selectbox(key='searchsource', label="Search Source", args=["Semantic Search - up to 10 Mar 2024", "Arxiv Search - Latest - (EXPERIMENTAL)"])
|
95 |
llm_model = st.sidebar.selectbox(key='llmmodel', label="LLM Model", ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "google/gemma-7b-it", "None"])
|
96 |
st.sidebar.markdown('### π ' + query)
|
97 |
result = client.predict(
|