JPBianchi commited on
Commit
0a29650
1 Parent(s): 2f7a3b7

hybrid search hides all toggles

Browse files
Files changed (1) hide show
  1. app.py +17 -17
app.py CHANGED
@@ -263,25 +263,25 @@ def main():
263
  st.text("Running OFFLINE")
264
  st.write("----------")
265
 
266
- hubrid_search = st.toggle('Hybrid Search', True)
267
- if hubrid_search:
268
  alpha_input = st.slider(label='Alpha',min_value=0.00, max_value=1.00, value=0.40, step=0.05)
269
  retrieval_limit = st.slider(label='Hybrid Search Results', min_value=10, max_value=300, value=10, step=10)
270
 
271
- hybrid_filter = st.toggle('Filter Search using Guest name', True) # i.e. look only at guests' data
272
-
273
- rerank = st.toggle('Rerank', True)
274
- if rerank:
275
- reranker_topk = st.slider(label='Reranker Top K',min_value=1, max_value=5, value=3, step=1)
276
- else:
277
- # needed to not fill the LLM with too many responses (> context size)
278
- # we could make it dependent on the model
279
- reranker_topk = 3
280
-
281
- rag_it = st.toggle('RAG it', True)
282
- if rag_it:
283
- st.write(f"Using LLM '{model_nameGPT}'")
284
- llm_temperature = st.slider(label='LLM T˚', min_value=0.0, max_value=2.0, value=0.01, step=0.10 )
285
 
286
  model_name_or_path = st.selectbox(label='Model Name:', options=available_models,
287
  index=available_models.index(model_default),
@@ -383,7 +383,7 @@ def main():
383
 
384
  st.write("\u21D0 Open the sidebar to change Search settings \n ") # https://home.unicode.org also 21E0, 21B0 B2 D0
385
 
386
- if not hubrid_search:
387
  st.stop()
388
 
389
  col1, _ = st.columns([3,7])
 
263
  st.text("Running OFFLINE")
264
  st.write("----------")
265
 
266
+ hybrid_search = st.toggle('Hybrid Search', True)
267
+ if hybrid_search:
268
  alpha_input = st.slider(label='Alpha',min_value=0.00, max_value=1.00, value=0.40, step=0.05)
269
  retrieval_limit = st.slider(label='Hybrid Search Results', min_value=10, max_value=300, value=10, step=10)
270
 
271
+ hybrid_filter = st.toggle('Filter Search using Guest name', True) # i.e. look only at guests' data
272
+
273
+ rerank = st.toggle('Rerank', True)
274
+ if rerank:
275
+ reranker_topk = st.slider(label='Reranker Top K',min_value=1, max_value=5, value=3, step=1)
276
+ else:
277
+ # needed to not fill the LLM with too many responses (> context size)
278
+ # we could make it dependent on the model
279
+ reranker_topk = 3
280
+
281
+ rag_it = st.toggle('RAG it', True)
282
+ if rag_it:
283
+ st.write(f"Using LLM '{model_nameGPT}'")
284
+ llm_temperature = st.slider(label='LLM T˚', min_value=0.0, max_value=2.0, value=0.01, step=0.10 )
285
 
286
  model_name_or_path = st.selectbox(label='Model Name:', options=available_models,
287
  index=available_models.index(model_default),
 
383
 
384
  st.write("\u21D0 Open the sidebar to change Search settings \n ") # https://home.unicode.org also 21E0, 21B0 B2 D0
385
 
386
+ if not hybrid_search:
387
  st.stop()
388
 
389
  col1, _ = st.columns([3,7])