eagle0504 commited on
Commit
bc24339
Β·
1 Parent(s): 64e1f3e

button added

Browse files
Files changed (1) hide show
  1. app.py +89 -90
app.py CHANGED
@@ -115,100 +115,99 @@ if submit_button:
115
 
116
 
117
  # React to user input
118
- if submit_button:
119
- if prompt := st.chat_input(initial_input):
120
- with st.spinner("Loading, please be patient with us ... πŸ™"):
121
- # Display user message in chat message container
122
- st.chat_message("user").markdown(prompt)
123
- # Add user message to chat history
124
- st.session_state.messages.append({"role": "user", "content": prompt})
125
-
126
- question = prompt
127
- begin_t = time.time()
128
- results = collection.query(query_texts=question, n_results=5)
129
- end_t = time.time()
130
- st.success(f"Query answser. | Time: {end_t - begin_t} sec")
131
- idx = results["ids"][0]
132
- idx = [int(i) for i in idx]
133
- ref = pd.DataFrame(
134
- {
135
- "idx": idx,
136
- "questions": [dataset["train"]["questions"][i] for i in idx],
137
- "answers": [dataset["train"]["answers"][i] for i in idx],
138
- "distances": results["distances"][0],
139
- }
 
 
 
 
 
 
 
 
 
140
  )
141
- # special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
142
- filtered_ref = ref[ref["distances"] < special_threshold]
143
- if filtered_ref.shape[0] > 0:
144
- st.success("There are highly relevant information in our database.")
145
- ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ")
146
- final_ref = filtered_ref
147
- else:
148
- st.warning(
149
- "The database may not have relevant information to help your question so please be aware of hallucinations."
150
- )
151
- ref_from_db_search = ref["answers"].str.cat(sep=" ")
152
- final_ref = ref
153
-
154
- if option == "YSA":
155
- try:
156
- begin_t = time.time()
157
- llm_response = llama2_7b_ysa(question)
158
- end_t = time.time()
159
- st.success(f"Running LLM. | Time: {end_t - begin_t} sec")
160
- except:
161
- st.warning("Sorry, the inference endpoint is temporarily down. πŸ˜”")
162
- llm_response = "NA."
163
- else:
164
- st.warning(
165
- "Apologies! We are in the progress of fine-tune the model, so it's currently unavailable. βš™οΈ"
166
- )
167
- llm_response = "NA"
168
-
169
- finetuned_llm_guess = ["from_llm", question, llm_response, 0]
170
- final_ref.loc[-1] = finetuned_llm_guess
171
- final_ref = final_ref.reset_index()
172
-
173
- # add ai judge as additional rating
174
- if run_ai_judge == "Yes":
175
- independent_ai_judge_score = []
176
- begin_t = time.time()
177
- for i in range(final_ref.shape[0]):
178
- this_content = final_ref["answers"][i]
179
- if len(this_content) > 3:
180
- arr1 = openai_text_embedding(question)
181
- arr2 = openai_text_embedding(this_content)
182
- # this_score = calculate_sts_openai_score(question, this_content)
183
- this_score = quantized_influence(arr1, arr2)
184
- else:
185
- this_score = 0
186
- independent_ai_judge_score.append(this_score)
187
-
188
- final_ref["ai_judge"] = independent_ai_judge_score
189
 
 
 
 
 
190
  end_t = time.time()
191
- st.success(f"Using AI Judge. | Time: {end_t - begin_t} sec")
192
-
193
- engineered_prompt = f"""
194
- Based on the context: {ref_from_db_search}
195
-
196
- answer the user question: {question}
 
 
 
197
 
198
- Answer the question directly (don't say "based on the context, ...")
199
- """
 
200
 
 
 
 
201
  begin_t = time.time()
202
- answer = call_chatgpt(engineered_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
203
  end_t = time.time()
204
- st.success(f"Final API Call. | Time: {end_t - begin_t} sec")
205
- response = answer
206
-
207
- # Display assistant response in chat message container
208
- with st.chat_message("assistant"):
209
- with st.spinner("Wait for it..."):
210
- st.markdown(response)
211
- with st.expander("See reference:"):
212
- st.table(final_ref)
213
- # Add assistant response to chat history
214
- st.session_state.messages.append({"role": "assistant", "content": response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
 
117
  # React to user input
118
+ if prompt := st.chat_input(initial_input):
119
+ with st.spinner("Loading, please be patient with us ... πŸ™"):
120
+ # Display user message in chat message container
121
+ st.chat_message("user").markdown(prompt)
122
+ # Add user message to chat history
123
+ st.session_state.messages.append({"role": "user", "content": prompt})
124
+
125
+ question = prompt
126
+ begin_t = time.time()
127
+ results = collection.query(query_texts=question, n_results=5)
128
+ end_t = time.time()
129
+ st.success(f"Query answser. | Time: {end_t - begin_t} sec")
130
+ idx = results["ids"][0]
131
+ idx = [int(i) for i in idx]
132
+ ref = pd.DataFrame(
133
+ {
134
+ "idx": idx,
135
+ "questions": [dataset["train"]["questions"][i] for i in idx],
136
+ "answers": [dataset["train"]["answers"][i] for i in idx],
137
+ "distances": results["distances"][0],
138
+ }
139
+ )
140
+ # special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
141
+ filtered_ref = ref[ref["distances"] < special_threshold]
142
+ if filtered_ref.shape[0] > 0:
143
+ st.success("There are highly relevant information in our database.")
144
+ ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ")
145
+ final_ref = filtered_ref
146
+ else:
147
+ st.warning(
148
+ "The database may not have relevant information to help your question so please be aware of hallucinations."
149
  )
150
+ ref_from_db_search = ref["answers"].str.cat(sep=" ")
151
+ final_ref = ref
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
+ if option == "YSA":
154
+ try:
155
+ begin_t = time.time()
156
+ llm_response = llama2_7b_ysa(question)
157
  end_t = time.time()
158
+ st.success(f"Running LLM. | Time: {end_t - begin_t} sec")
159
+ except:
160
+ st.warning("Sorry, the inference endpoint is temporarily down. πŸ˜”")
161
+ llm_response = "NA."
162
+ else:
163
+ st.warning(
164
+ "Apologies! We are in the progress of fine-tune the model, so it's currently unavailable. βš™οΈ"
165
+ )
166
+ llm_response = "NA"
167
 
168
+ finetuned_llm_guess = ["from_llm", question, llm_response, 0]
169
+ final_ref.loc[-1] = finetuned_llm_guess
170
+ final_ref = final_ref.reset_index()
171
 
172
+ # add ai judge as additional rating
173
+ if run_ai_judge == "Yes":
174
+ independent_ai_judge_score = []
175
  begin_t = time.time()
176
+ for i in range(final_ref.shape[0]):
177
+ this_content = final_ref["answers"][i]
178
+ if len(this_content) > 3:
179
+ arr1 = openai_text_embedding(question)
180
+ arr2 = openai_text_embedding(this_content)
181
+ # this_score = calculate_sts_openai_score(question, this_content)
182
+ this_score = quantized_influence(arr1, arr2)
183
+ else:
184
+ this_score = 0
185
+ independent_ai_judge_score.append(this_score)
186
+
187
+ final_ref["ai_judge"] = independent_ai_judge_score
188
+
189
  end_t = time.time()
190
+ st.success(f"Using AI Judge. | Time: {end_t - begin_t} sec")
191
+
192
+ engineered_prompt = f"""
193
+ Based on the context: {ref_from_db_search}
194
+
195
+ answer the user question: {question}
196
+
197
+ Answer the question directly (don't say "based on the context, ...")
198
+ """
199
+
200
+ begin_t = time.time()
201
+ answer = call_chatgpt(engineered_prompt)
202
+ end_t = time.time()
203
+ st.success(f"Final API Call. | Time: {end_t - begin_t} sec")
204
+ response = answer
205
+
206
+ # Display assistant response in chat message container
207
+ with st.chat_message("assistant"):
208
+ with st.spinner("Wait for it..."):
209
+ st.markdown(response)
210
+ with st.expander("See reference:"):
211
+ st.table(final_ref)
212
+ # Add assistant response to chat history
213
+ st.session_state.messages.append({"role": "assistant", "content": response})