|
import gradio as gr |
|
from transformers import pipeline |
|
import os |
|
|
|
|
|
pipe = pipeline(task="automatic-speech-recognition", model="geokanaan/Whisper_Base_Lebanese_Arabizi") |
|
|
|
|
|
|
|
def transcribe(audio, actual_transcription): |
|
text = pipe(audio) |
|
return text['text'] |
|
|
|
|
|
HF_TOKEN = os.getenv('WRITE') |
|
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "flagged_Audio_Lebanese") |
|
|
|
iface = gr.Interface( |
|
fn=transcribe, |
|
inputs=[ |
|
gr.Audio(source="microphone", type="filepath"), |
|
gr.Textbox(label="Actual Transcription", optional=True) |
|
], |
|
outputs="text", |
|
title="arabeasy", |
|
description="Realtime demo for Lebanese Arabizi speech recognition", |
|
allow_flagging='manual', |
|
flagging_callback=hf_writer |
|
) |
|
|
|
iface.launch(share=True) |