JPBianchi commited on
Commit
bbf4302
1 Parent(s): 8ee27bb

More cleaning

Browse files
Files changed (1) hide show
  1. app.py +3 -15
app.py CHANGED
@@ -88,6 +88,7 @@ data_path = './data/impact_theory_data.json'
88
  cache_path = 'data/impact_theory_cache.parquet'
89
  data = load_data(data_path)
90
  cache = None # load_content_cache(cache_path)
 
91
 
92
  if 'secrets' in st.secrets:
93
  # st.write("Loading secrets from [secrets] section")
@@ -109,15 +110,6 @@ else :
109
  hf_token = st.secrets['LLAMA2_ENDPOINT_HF_TOKEN_chris']
110
  hf_endpoint = st.secrets['LLAMA2_ENDPOINT_UPLIMIT']
111
 
112
- # else:
113
- # # if we want to use env file
114
- # st.write("Loading secrets from environment variables")
115
- # api_key = os.environ['WEAVIATE_API_KEY']
116
- # url = os.environ['WEAVIATE_ENDPOINT']
117
- # openai_api_key = os.environ['OPENAI_API_KEY']
118
-
119
- # hf_token = os.environ['LLAMA2_ENDPOINT_HF_TOKEN_chris']
120
- # hf_endpoint = os.environ['LLAMA2_ENDPOINT_UPLIMIT']
121
  #%%
122
  # model_default = 'sentence-transformers/all-mpnet-base-v2'
123
  model_default = 'models/finetuned-all-mpnet-base-v2-300' if we_are_not_online \
@@ -155,7 +147,7 @@ def download_model(model_name_or_path, model_local_path):
155
 
156
  #%%
157
  # for streamlit online, we must download the model from google drive
158
- # because github LFS doesn't work on forked repos
159
  def check_model(model_name_or_path):
160
 
161
  model_name = model_name_or_path.split('/')[-1] # remove 'sentence-transformers'
@@ -186,9 +178,6 @@ def get_weaviate_client(api_key, url, model_name_or_path, openai_api_key):
186
 
187
 
188
  ##############
189
- # data = load_data(data_path)
190
- # guests list for sidebar
191
- guest_list = sorted(list(set([d['guest'] for d in data])))
192
 
193
  def main():
194
 
@@ -379,7 +368,6 @@ def main():
379
  with col2:
380
  # let's add a nice pulse bar while generating the response
381
  with hc.HyLoader('', hc.Loaders.pulse_bars, primary_color= 'red', height=50): #"#0e404d" for image green
382
- # with st.spinner('Generating Response...'):
383
 
384
  with col1:
385
 
@@ -434,7 +422,7 @@ def main():
434
  verbose=True)
435
  # st.write(f"Number of results: {len(valid_response)}")
436
 
437
- # I jump out of col1 to get all page width, so need to retest query
438
  if query:
439
 
440
  # creates container for LLM response to position it above search results
 
88
  cache_path = 'data/impact_theory_cache.parquet'
89
  data = load_data(data_path)
90
  cache = None # load_content_cache(cache_path)
91
+ guest_list = sorted(list(set([d['guest'] for d in data])))
92
 
93
  if 'secrets' in st.secrets:
94
  # st.write("Loading secrets from [secrets] section")
 
110
  hf_token = st.secrets['LLAMA2_ENDPOINT_HF_TOKEN_chris']
111
  hf_endpoint = st.secrets['LLAMA2_ENDPOINT_UPLIMIT']
112
 
 
 
 
 
 
 
 
 
 
113
  #%%
114
  # model_default = 'sentence-transformers/all-mpnet-base-v2'
115
  model_default = 'models/finetuned-all-mpnet-base-v2-300' if we_are_not_online \
 
147
 
148
  #%%
149
  # for streamlit online, we must download the model from google drive
150
+
151
  def check_model(model_name_or_path):
152
 
153
  model_name = model_name_or_path.split('/')[-1] # remove 'sentence-transformers'
 
178
 
179
 
180
  ##############
 
 
 
181
 
182
  def main():
183
 
 
368
  with col2:
369
  # let's add a nice pulse bar while generating the response
370
  with hc.HyLoader('', hc.Loaders.pulse_bars, primary_color= 'red', height=50): #"#0e404d" for image green
 
371
 
372
  with col1:
373
 
 
422
  verbose=True)
423
  # st.write(f"Number of results: {len(valid_response)}")
424
 
425
+ # I jumped out of col1 to get all page width, so need to retest query
426
  if query:
427
 
428
  # creates container for LLM response to position it above search results