import gradio as gr import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline model_id = "lyhourt/whisper-small-clean_v6" device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=120, batch_size=16, return_timestamps=True, torch_dtype=torch_dtype, device=device, ) def transcribe(audio): text = pipe(audio)["text"] return text iface = gr.Interface( fn=transcribe, inputs=gr.Audio(sources=["upload","microphone"], type="filepath"), outputs="text", title="Whisper Small Hungarian", description="Realtime demo for Hungarian speech recognition using a fine-tuned Whisper small.", ) iface.launch()