|
import gradio as gr |
|
import whisper |
|
from transformers import pipeline |
|
import pandas as pd |
|
|
|
|
|
|
|
whisper_model = whisper.load_model("base") |
|
|
|
|
|
summarization = pipeline("summarization", model="google/pegasus-large") |
|
|
|
def process_audio(audio_file, min_length, max_length): |
|
try: |
|
|
|
if audio_file is None: |
|
raise ValueError("No audio file provided.") |
|
|
|
|
|
result = whisper_model.transcribe(audio_file) |
|
text = result['text'] |
|
|
|
|
|
if not text: |
|
raise ValueError("Failed to transcribe the audio. The transcription result is empty.") |
|
|
|
|
|
summary_result = summarization(text, min_length=min_length, max_length=max_length) |
|
summary = summary_result[0]['summary_text'] |
|
|
|
|
|
if not summary: |
|
raise ValueError("Failed to summarize the transcript. The summary result is empty.") |
|
|
|
|
|
df_results = pd.DataFrame({ |
|
"Audio File": [audio_file], |
|
"Transcript": [text], |
|
"Summary": [summary] |
|
}) |
|
|
|
|
|
df_results.to_csv("results.csv", index=False) |
|
|
|
|
|
return text, summary |
|
|
|
except Exception as e: |
|
|
|
error_message = f"An error occurred: {str(e)}" |
|
return error_message, error_message |
|
|
|
|
|
iface = gr.Interface( |
|
fn=process_audio, |
|
inputs=[ |
|
gr.Audio(sources="upload", type="filepath", label="Upload your audio file"), |
|
gr.Slider(minimum=10, maximum=50, value=30, label="Minimum Summary Length"), |
|
gr.Slider(minimum=50, maximum=600, value=100, label="Maximum Summary Length") |
|
], |
|
outputs=[ |
|
gr.Textbox(label="Transcript"), |
|
gr.Textbox(label="Summary") |
|
], |
|
title="Audio to Summarized Transcript", |
|
description="Upload an audio file and adjust summary length to get both the transcript and summary." |
|
) |
|
|
|
|
|
iface.launch() |
|
|