amanu / app.py
katospiegel's picture
hf cli lfs
cef3824
raw
history blame
7.13 kB
import torch
import gradio as gr
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
from transcription import fast_transcription, speech_to_text, doWhisperX
from whisperx.utils import get_writer
from audio import normalizeAudio, separateVoiceInstrumental, mp3_to_wav, stereo_to_mono, cutaudio, compose_audio
from helpers import guardar_en_archivo, guardar_dataframe_en_csv
import json
def transcribe(audiofile, model):
audio_path = audiofile[0].name
audio_normalized_path = normalizeAudio(audio_path, ".wav")
novocal_path, vocal_path = separateVoiceInstrumental(audio_normalized_path)
novocal_path = mp3_to_wav(novocal_path, "novocal")
vocal_path = mp3_to_wav(vocal_path, "vocal")
result = fast_transcription(vocal_path, model, "es")
out = [str(s["start"]) + " " + s["text"] for s in result["segments"]]
#transcript = "\n".join(out)
#Archivo
nombre_archivo = guardar_en_archivo(out)
return audio_path, audio_normalized_path, vocal_path, novocal_path, nombre_archivo, out, json.dumps(result)
def transcribeWhisperX(audiofile, model, language, patiente,
initial_prompt, condition_on_previous_text, temperature,
compression, logprob, no_speech_threshold):
audio_path = audiofile[0].name
audio_normalized_path = normalizeAudio(audio_path, ".wav")
novocal_path, vocal_path = separateVoiceInstrumental(audio_normalized_path)
novocal_path = mp3_to_wav(novocal_path, "novocal")
vocal_path = mp3_to_wav(vocal_path, "vocal")
#result = fast_transcription(vocal_path, model, "es")
result_aligned, result_speakers, diarize_segments = doWhisperX(vocal_path, whisper_model=model)
#out = [str(s["start"]) + " " + s["text"] for s in result["segments"]]
#transcript = "\n".join(out)
#Archivo
#nombre_archivo = guardar_en_archivo(out)
##########################################################################
import whisperx
from pathlib import Path
# device = "cuda"
# model_a, metadata = whisperx.load_align_model(
# language_code="es", device=device
# )
# result_aligned = whisperx.align(
# result["segments"],
# model_a,
# metadata,
# vocal_path,
# device=device,
# )
import datetime
fecha_actual = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Generar el nombre del archivo
nombre_archivo = f"transcription_{fecha_actual}.srt"
file_path = Path(nombre_archivo)
writter_args = {"highlight_words": None, "max_line_count": None, "max_line_width": None}
srt_writer = get_writer("srt", Path("."))
srt_writer(result_aligned, str(file_path.stem), writter_args)
# with open(
# nombre_archivo,
# "w",
# encoding="utf-8",
# ) as srt:
# write_srt(result_aligned["segments"], file=srt)
###########################################################################
return audio_path, audio_normalized_path, vocal_path, novocal_path, vocal_path, str(file_path.stem), guardar_dataframe_en_csv(diarize_segments), json.dumps(result_speakers)
transcribeI = gr.Interface(
fn=transcribe,
inputs=[
gr.File(label="Upload Files", file_count="multiple"),
gr.Radio(["base", "small", "medium", "large-v2"], label="Models", value="large-v2"),
],
outputs=[gr.Audio(type="filepath", label="original"),
gr.Audio(type="filepath", label="normalized"),
gr.Audio(type="filepath", label="vocal"),
gr.Audio(type="filepath", label="no_vocal"),
gr.File(label="Archivo generado"),
gr.TextArea(label="Transcripci贸n"),
gr.JSON(label="JSON Output")
],
theme="huggingface",
title="Transcripci贸n con Whisper",
description=(
"Esta p谩gina realiza una transcripci贸n de audio utilizando Whisper. Adem谩s a帽ade varias mejoras y utilidades: a) Preprocesamiento del audio y limpieza de ruido ambiental, b) Conversi贸n de los archivos de audio a un formato compatible con Whisper, c) C谩lculo de la marca temporal palabra por palabra, d) C谩lculo del nivel de seguridad de la transcripci贸n, e) Conversi贸n del resultado a .csv, .srt y ass.\n"
),
allow_flagging="never",
examples=[[None, "Espana 04 - Video 01 - extracto 2 min.wav", "large-v2"]]
)
transcribeII = gr.Interface(
fn=transcribeWhisperX,
inputs=[
gr.File(label="Upload Files", file_count="multiple"),
gr.Radio(["base", "small", "medium", "large-v2"], label="Modelo", value="large-v2"),
gr.Dropdown(["Cualquiera","es","en","fr","pt"], label="Lenguaje", value="Cualquiera"),
gr.Slider(minimum=0, maximum=1, label="Patience (Whisper parameter)", value=0.5, info="Optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search"),
gr.Textbox(label="Initial Prompt (Whisper parameter)", value=""),
gr.Textbox(label="Condition on previous text (Whisper parameter)", value=""),
gr.Slider(minimum=0, maximum=1, label="Temperature (Whisper parameter)", value=0.5, info="Temperature to use for sampling"),
gr.Slider(minimum=0, maximum=1, label="Compression Ratio Threshold (Whisper parameter)", value=0.5),
gr.Slider(minimum=0, maximum=1, label="Logprob Threshold (Whisper parameter)", value=0.5),
gr.Slider(minimum=0, maximum=1, label="No Speech Threshold (Whisper parameter)", value=0.5),
],
outputs=[gr.Audio(type="filepath", label="original"),
gr.Audio(type="filepath", label="normalized"),
gr.Audio(type="filepath", label="vocal"),
gr.Audio(type="filepath", label="no_vocal"),
gr.File(label="Archivo SRT generado"),
gr.File(label="Archivo CSV generado"),
gr.File(label="Tabla con diarizaci贸n generada"),
gr.JSON(label="JSON Output"),
#gr.JSON(label="JSON Output"),
#gr.File(label="Archivo generado")
],
theme="huggingface",
title="Transcripci贸n con WhisperX",
description=(
"Esta p谩gina realiza una transcripci贸n de audio utilizando Whisper. Adem谩s a帽ade varias mejoras y utilidades: a) Preprocesamiento del audio y limpieza de ruido ambiental, b) Conversi贸n de los archivos de audio a un formato compatible con Whisper, c) C谩lculo de la marca temporal palabra por palabra, d) C谩lculo del nivel de seguridad de la transcripci贸n, e) Conversi贸n del resultado a .csv, .srt y ass.\n"
),
allow_flagging="never",
#examples=[[None, "COSER-4004-01-00_5m.wav", "large-v2"]]
)
demo = gr.Blocks()
with demo:
gr.Markdown("# Amanuensis. Transcripci贸n de audios basada en OpenAI Whisper.")
gr.TabbedInterface([transcribeI, transcribeII], ["Transcripci贸n con Whisper", "Transcripci贸n y diarizaci贸n con WhisperX"])
#demo.queue(concurrency_count=1).launch(enable_queue=True, auth=(os.environ['USER'], os.environ['PASSWORD']))
demo.launch(enable_queue=True)