alemarino2025 commited on
Commit
4c730de
·
verified ·
1 Parent(s): 068f53c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -16
app.py CHANGED
@@ -158,6 +158,22 @@ class TimeoutException(Exception):
158
  def alarm_handler(signum, frame):
159
  raise TimeoutException("Timed out!")
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
  # Initialize conversation history
163
  conversation_history = []
@@ -225,22 +241,6 @@ def predict(user_input, timeout_seconds=1800): # 30 minutes = 1800 seconds
225
  # While the prediction is made, log boh the inputs and outputs to a local log file
226
  # while writing to the log file, ensure that the commit scheduler is locked to avoid parallel
227
  # access
228
-
229
- json_data = {
230
- "user_input": user_input,
231
- "retrieved_context": context_for_query,
232
- "model_response": prediction
233
- }
234
-
235
- with scheduler.lock:
236
- with log_file.open("a") as f:
237
- try:
238
- #json.dump(json_data, f)
239
- f.write(json.dumps(json_data))
240
- f.write("\n")
241
- except TypeError as e:
242
- logger.error(f"Failed to serialize JSON: {str(e)}")
243
- raise
244
 
245
  return prediction
246
 
@@ -256,5 +256,7 @@ demo = gr.Interface(
256
  concurrency_limit=16
257
  )
258
 
 
 
259
  demo.queue()
260
  demo.launch(share=True)
 
158
  def alarm_handler(signum, frame):
159
  raise TimeoutException("Timed out!")
160
 
161
+ # Define the function that will be called when the user submits its feedback (to be called in Gradio)
162
+ def save_feedback(user_input:str, context_for_query: str, prediction:str) -> None:
163
+ """
164
+ Append input/outputs and user feedback to a JSON Lines file using a thread lock to avoid concurrent writes from different users.
165
+ """
166
+ with scheduler.lock:
167
+ with log_file.open("a") as f:
168
+ f.write(json.dumps({"user_input": user_input,"retrieved_context": context_for_query,"model_response": prediction}))
169
+ f.write("\n")
170
+
171
+ # json_data = {
172
+ # "user_input": user_input,
173
+ # "retrieved_context": context_for_query,
174
+ # "model_response": prediction
175
+ # }
176
+
177
 
178
  # Initialize conversation history
179
  conversation_history = []
 
241
  # While the prediction is made, log boh the inputs and outputs to a local log file
242
  # while writing to the log file, ensure that the commit scheduler is locked to avoid parallel
243
  # access
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
  return prediction
246
 
 
256
  concurrency_limit=16
257
  )
258
 
259
+ save_feedback()
260
+
261
  demo.queue()
262
  demo.launch(share=True)