|
import os |
|
|
|
import numpy as np |
|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
transcriber = pipeline(task="automatic-speech-recognition", model="geokanaan/Whisper_Base_Lebanese_Arabizi") |
|
|
|
HF_TOKEN = os.getenv('WRITE') |
|
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "flagged_Audio_Lebanese") |
|
|
|
def transcribe(audio): |
|
sr, y = audio |
|
|
|
|
|
if y.ndim > 1: |
|
y = y.mean(axis=1) |
|
|
|
y = y.astype(np.float32) |
|
y /= np.max(np.abs(y)) |
|
|
|
return transcriber({"sampling_rate": sr, "raw": y})["text"] |
|
|
|
|
|
demo = gr.Interface( |
|
transcribe, |
|
gr.Audio(sources=["microphone"]), |
|
"text", |
|
title="Arabeasy", |
|
description="Realtime demo for Lebanese Arabizi speech recognition", |
|
allow_flagging='manual', |
|
) |
|
|
|
demo.launch() |
|
|