Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -53,7 +53,7 @@ def process_audio(audio_file, translate=False):
|
|
53 |
"""Process audio file"""
|
54 |
transcriber = ChunkedTranscriber(chunk_size=5, overlap=1)
|
55 |
results = transcriber.transcribe_audio(audio_file, translate=True)
|
56 |
-
return
|
57 |
# try:
|
58 |
# processor = AudioProcessor()
|
59 |
# language_segments, final_segments = processor.process_audio(audio_file, translate)
|
@@ -92,7 +92,7 @@ def summarize_text(results):
|
|
92 |
if summarizer is None:
|
93 |
return "Summarization model could not be loaded."
|
94 |
|
95 |
-
summary = summarizer(''.join(
|
96 |
return summary
|
97 |
except Exception as e:
|
98 |
logger.error(f"Summarization failed: {str(e)}")
|
@@ -108,9 +108,10 @@ def answer_question(context, question):
|
|
108 |
return "Q&A model could not be loaded."
|
109 |
if not question :
|
110 |
return "Please enter your Question"
|
|
|
111 |
messages = [
|
112 |
{"role": "system", "content": "You are a helpful assistant who can answer questions based on the given context."},
|
113 |
-
{"role": "user", "content": f"Context: {''.join(
|
114 |
]
|
115 |
|
116 |
response = qa_pipeline(messages, max_new_tokens=256)[0]['generated_text']
|
|
|
53 |
"""Process audio file"""
|
54 |
transcriber = ChunkedTranscriber(chunk_size=5, overlap=1)
|
55 |
results = transcriber.transcribe_audio(audio_file, translate=True)
|
56 |
+
return results
|
57 |
# try:
|
58 |
# processor = AudioProcessor()
|
59 |
# language_segments, final_segments = processor.process_audio(audio_file, translate)
|
|
|
92 |
if summarizer is None:
|
93 |
return "Summarization model could not be loaded."
|
94 |
|
95 |
+
summary = summarizer(''.join(item['translated'] for item in results if 'translated' in item), max_length=150, min_length=50, do_sample=False)[0]['summary_text']
|
96 |
return summary
|
97 |
except Exception as e:
|
98 |
logger.error(f"Summarization failed: {str(e)}")
|
|
|
108 |
return "Q&A model could not be loaded."
|
109 |
if not question :
|
110 |
return "Please enter your Question"
|
111 |
+
|
112 |
messages = [
|
113 |
{"role": "system", "content": "You are a helpful assistant who can answer questions based on the given context."},
|
114 |
+
{"role": "user", "content": f"Context: {''.join(item['translated'] for item in context if 'translated' in item)}\n\nQuestion: {question}"}
|
115 |
]
|
116 |
|
117 |
response = qa_pipeline(messages, max_new_tokens=256)[0]['generated_text']
|