Spaces:
Runtime error
Runtime error
File size: 1,653 Bytes
bf3436c 419f5db d17d37a 8cc6503 bb7420f 8cc6503 bb7420f 01e9eb0 419f5db cb266ba 419f5db d17d37a 419f5db d17d37a cb301f9 31e1794 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import gradio as gr
import torch
from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration, BartForConditionalGeneration, BartTokenizer
from huggingface_hub import login
import os
# Retrieve the token from the environment variable
hf_api_token = os.getenv("HF_API_TOKEN")
if hf_api_token is None:
raise ValueError("HF_API_TOKEN environment variable is not set")
# Authenticate with Hugging Face
login(token=hf_api_token, add_to_git_credential=True)
# Initialize the Whisper processor and model
whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
# Initialize the summarization model and tokenizer
# Use BART model for summarization
summarization_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
summarization_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
# Function to transcribe audio
def transcribe_audio(audio_file):
# Load audio file
audio_input, _ = whisper_processor(audio_file, return_tensors="pt", sampling_rate=16000).input_values
# Generate transcription
transcription_ids = whisper_model.generate(audio_input)
transcription = whisper_processor.decode(transcription_ids[0])
return transcription
# Function to summarize text
def summarize_text(text):
inputs = summarization_tokenizer(text, return_tensors="pt", max_length=1024, truncation=True)
summary_ids = summarization_model.generate(inputs.input_ids, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
summary
|