Spaces:
Runtime error
Runtime error
File size: 1,667 Bytes
64db4c7 d1c226d 64db4c7 c256705 64db4c7 28bbb3d 64db4c7 a1c7d96 14f970c 28bbb3d b63a826 14f970c c256705 a1c7d96 c256705 64db4c7 a1c7d96 ff97a84 64db4c7 a1c7d96 28bbb3d e5149ce c256705 64db4c7 28bbb3d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
import torch
from transformers import pipeline
import numpy as np
import time
pipe_vol_g = pipeline("automatic-speech-recognition", model="IABDs8a/whisper-equipo3-g")
pipe_small = pipeline("automatic-speech-recognition", model="aitor-medrano/whisper-small-lara")
pipe_hombres = pipeline("automatic-speech-recognition", model="IABDs8a/lara-hombres-equipo-3")
def greet(grabacion):
inicio = time.time()
sr, y = grabacion
# Pasamos el array de muestras a tipo NumPy de 32 bits
y = y.astype(np.float32)
y /= np.max(np.abs(y))
inicio_g = time.time()
result_g = "g:" + pipe_vol_g({"sampling_rate": sr, "raw": y})["text"]
fin_g = time.time()
inicio_small = time.time()
result_small = "small:" + pipe_small({"sampling_rate": sr, "raw": y})["text"]
fin_small = time.time()
inicio_hombres = time.time()
result_hombres = "hombres:" + pipe_hombres({"sampling_rate": sr, "raw": y})["text"]
fin_hombres = time.time()
fin = time.time()
return result_g, fin_g - inicio_g, result_small, fin_small - inicio_small, result_hombres, fin_hombres - inicio_hombres, fin - inicio
#return result_base, result_small, fin - inicio
demo = gr.Interface(fn=greet,
inputs=[
gr.Audio(),
],
outputs=[
gr.Text(label="Salida (Voluntaria G)"),
gr.Number(label="Tiempo (Voluntaria G)"),
gr.Text(label="Salida (Small)"),
gr.Number(label="Tiempo (Small)"),
gr.Text(label="Salida (Hombres)"),
gr.Number(label="Tiempo (Hombres)"),
gr.Number(label="Tiempo total")
])
demo.launch()
|