Kr08 commited on
Commit
9fb4314
·
verified ·
1 Parent(s): 671b74b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -14
app.py CHANGED
@@ -120,8 +120,6 @@ def process_and_summarize(audio_file, translate, model_size, do_summarize=True):
120
  logger.error(traceback.format_exc())
121
  raise gr.Error(f"Processing failed: {str(e)}")
122
 
123
-
124
-
125
 
126
  @spaces.GPU(duration=60)
127
  def answer_question(context, question):
@@ -138,25 +136,31 @@ def answer_question(context, question):
138
 
139
  outputs = qa_pipeline(messages, max_new_tokens=256)
140
 
141
- # Extract the answer from the output
142
- if isinstance(outputs, list) and len(outputs) > 0 and isinstance(outputs[0], dict):
143
- for message in outputs[0]:
144
- if message.get('role') == 'assistant':
145
- answer = message.get('content', '')
146
- break
 
 
 
 
 
 
147
  else:
148
- answer = "No answer found in the model's response."
149
- else:
150
- answer = str(outputs) # Fallback to string representation of outputs
151
-
152
- logger.info("Q&A process completed successfully")
 
153
  return answer
154
  except Exception as e:
155
  logger.error(f"Q&A process failed: {str(e)}")
156
  logger.error(traceback.format_exc())
157
  return f"Error occurred during Q&A process. Please try again. Error: {str(e)}"
158
 
159
-
160
  # Main interface
161
  with gr.Blocks() as iface:
162
  gr.Markdown("# WhisperX Audio Transcription, Translation, Summarization, and Q&A (with ZeroGPU support)")
 
120
  logger.error(traceback.format_exc())
121
  raise gr.Error(f"Processing failed: {str(e)}")
122
 
 
 
123
 
124
  @spaces.GPU(duration=60)
125
  def answer_question(context, question):
 
136
 
137
  outputs = qa_pipeline(messages, max_new_tokens=256)
138
 
139
+ logger.info(f"Raw model output: {outputs}")
140
+
141
+ # Parse the string output into a list of dictionaries
142
+ try:
143
+ parsed_output = ast.literal_eval(outputs)
144
+ if isinstance(parsed_output, list):
145
+ for item in parsed_output:
146
+ if isinstance(item, dict) and item.get('role') == 'assistant':
147
+ answer = item.get('content', '')
148
+ break
149
+ else:
150
+ answer = "No assistant response found in the model's output."
151
  else:
152
+ answer = str(parsed_output)
153
+ except (ValueError, SyntaxError):
154
+ # If parsing fails, return the raw output
155
+ answer = str(outputs)
156
+
157
+ logger.info(f"Extracted answer: {answer}")
158
  return answer
159
  except Exception as e:
160
  logger.error(f"Q&A process failed: {str(e)}")
161
  logger.error(traceback.format_exc())
162
  return f"Error occurred during Q&A process. Please try again. Error: {str(e)}"
163
 
 
164
  # Main interface
165
  with gr.Blocks() as iface:
166
  gr.Markdown("# WhisperX Audio Transcription, Translation, Summarization, and Q&A (with ZeroGPU support)")