Lab2Youtube / app.py
Victorlopo21's picture
Rename whisper_youtube.py to app.py
b446731
# -*- coding: utf-8 -*-
"""whisper_youtube.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1spmA-7Un5TA6ahuCeO62BUS_ME6zPuUx
# Using gradio for making a nice UI.
Youtube link version.
Installing requirements.
"""
#!pip install gradio
#!pip install git+https://github.com/huggingface/transformers
#!pip install pytube
from pytube import YouTube
from transformers import pipeline
import gradio as gr
import os
"""## Building a Demo
Now that we've fine-tuned our model we can build a demo to show
off its ASR capabilities! We'll make use of 🤗 Transformers
`pipeline`, which will take care of the entire ASR pipeline,
right from pre-processing the audio inputs to decoding the
model predictions.
Running the example below will generate a Gradio demo where can input audio to
our fine-tuned Whisper model to transcribe the corresponding text:
"""
pipe = pipeline(model="Victorlopo21/whisper-medium-gl-30") # change to "your-username/the-name-you-picked"
def get_audio(url):
yt = YouTube(url)
video = yt.streams.filter(only_audio=True)[1]
out_file=video.download(output_path=".")
base, ext = os.path.splitext(out_file)
new_file = base+'.wav'
os.rename(out_file, new_file)
a = new_file
return a
def transcribe_url(url):
text = pipe(get_audio(url))['text']
return text
iface = gr.Interface(
fn=transcribe_url,
inputs='text',
outputs="text",
title="Whisper Medium Galician",
description="Realtime demo for Galician speech recognition of a YouTube video using a fine-tuned Whisper medium model.",
)
iface.launch(debug=True)
# Short youtube video to hear
# https://www.youtube.com/watch?v=Z2SjeZJZi6s&ab_channel=rimc7
# TO TRY