Kr08 commited on
Commit
28d3de2
·
verified ·
1 Parent(s): bd2dd07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -19
app.py CHANGED
@@ -6,7 +6,6 @@ import torch
6
  import logging
7
  import traceback
8
  import sys
9
- import ast
10
 
11
  logging.basicConfig(
12
  level=logging.INFO,
@@ -122,6 +121,7 @@ def process_and_summarize(audio_file, translate, model_size, do_summarize=True):
122
  raise gr.Error(f"Processing failed: {str(e)}")
123
 
124
 
 
125
  @spaces.GPU(duration=60)
126
  def answer_question(context, question):
127
  logger.info("Starting Q&A process")
@@ -135,25 +135,19 @@ def answer_question(context, question):
135
  {"role": "user", "content": f"Context: {context}\n\nQuestion: {question}"},
136
  ]
137
 
138
- outputs = qa_pipeline(messages, max_new_tokens=256)
139
 
140
- logger.info(f"Raw model output: {outputs}")
141
-
142
- # Parse the string output into a list of dictionaries
143
- try:
144
- parsed_output = ast.literal_eval(outputs)
145
- if isinstance(parsed_output, list):
146
- for item in parsed_output:
147
- if isinstance(item, dict) and item.get('role') == 'assistant':
148
- answer = item.get('content', '')
149
- break
150
- else:
151
- answer = "No assistant response found in the model's output."
152
- else:
153
- answer = str(parsed_output)
154
- except (ValueError, SyntaxError):
155
- # If parsing fails, return the raw output
156
- answer = str(outputs)
157
 
158
  logger.info(f"Extracted answer: {answer}")
159
  return answer
@@ -162,6 +156,7 @@ def answer_question(context, question):
162
  logger.error(traceback.format_exc())
163
  return f"Error occurred during Q&A process. Please try again. Error: {str(e)}"
164
 
 
165
  # Main interface
166
  with gr.Blocks() as iface:
167
  gr.Markdown("# WhisperX Audio Transcription, Translation, Summarization, and Q&A (with ZeroGPU support)")
 
6
  import logging
7
  import traceback
8
  import sys
 
9
 
10
  logging.basicConfig(
11
  level=logging.INFO,
 
121
  raise gr.Error(f"Processing failed: {str(e)}")
122
 
123
 
124
+
125
  @spaces.GPU(duration=60)
126
  def answer_question(context, question):
127
  logger.info("Starting Q&A process")
 
135
  {"role": "user", "content": f"Context: {context}\n\nQuestion: {question}"},
136
  ]
137
 
138
+ out = qa_pipeline(messages, max_new_tokens=256)
139
 
140
+ logger.info(f"Raw model output: {out}")
141
+
142
+ generated_text = out[0]['generated_text']
143
+
144
+ # Find the assistant's message
145
+ for message in generated_text:
146
+ if message['role'] == 'assistant':
147
+ answer = message['content']
148
+ break
149
+ else:
150
+ answer = "No assistant response found in the model's output."
 
 
 
 
 
 
151
 
152
  logger.info(f"Extracted answer: {answer}")
153
  return answer
 
156
  logger.error(traceback.format_exc())
157
  return f"Error occurred during Q&A process. Please try again. Error: {str(e)}"
158
 
159
+
160
  # Main interface
161
  with gr.Blocks() as iface:
162
  gr.Markdown("# WhisperX Audio Transcription, Translation, Summarization, and Q&A (with ZeroGPU support)")