Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,050 Bytes
714e414 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import os
from math import floor
from typing import Optional
import spaces
import torch
import gradio as gr
from transformers import pipeline
from transformers.pipelines.audio_utils import ffmpeg_read
# config
model_name = "kotoba-tech/kotoba-whisper-v2.2"
example_file = "sample_diarization_japanese.mp3"
# device setting
if torch.cuda.is_available():
torch_dtype = torch.bfloat16
device = "cuda"
model_kwargs = {'attn_implementation': 'sdpa'}
else:
torch_dtype = torch.float32
device = "cpu"
model_kwargs = {}
# define the pipeline
pipe = pipeline(
model=model_name,
chunk_length_s=15,
batch_size=16,
torch_dtype=torch_dtype,
device=device,
model_kwargs=model_kwargs,
trust_remote_code=True
)
sampling_rate = pipe.feature_extractor.sampling_rate
def format_time(start: Optional[float], end: Optional[float]):
def _format_time(seconds: Optional[float]):
if seconds is None:
return "[no timestamp available]"
minutes = floor(seconds / 60)
hours = floor(seconds / 3600)
seconds = seconds - hours * 3600 - minutes * 60
m_seconds = floor(round(seconds - floor(seconds), 1) * 10)
seconds = floor(seconds)
return f'{minutes:02}:{seconds:02}.{m_seconds:01}'
return f"[{_format_time(start)} -> {_format_time(end)}]:"
@spaces.GPU
def get_prediction(inputs):
return pipe(inputs, generate_kwargs={"language": "ja", "task": "transcribe"})
def transcribe(inputs: str):
if inputs is None:
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
with open(inputs, "rb") as f:
inputs = f.read()
prediction = get_prediction({"array": ffmpeg_read(inputs, sampling_rate), "sampling_rate": sampling_rate})
output = ""
for n, s in enumerate(prediction["speakers"]):
text_timestamped = "\n".join([f"- **{format_time(*c['timestamp'])}** {c['text']}" for c in prediction[f"chunks/{s}"]])
output += f'### Speaker {n+1} \n{text_timestamped}\n'
return output
description = (f"Transcribe and diarize long-form microphone or audio inputs with the click of a button! Demo uses "
f"Kotoba-Whisper [{model_name}](https://huggingface.co/{model_name}).")
title = f"Audio Transcription and Diarization with {os.path.basename(model_name)}"
shared_config = {"fn": transcribe, "title": title, "description": description, "allow_flagging": "never", "examples": [example_file]}
o_upload = gr.Markdown()
o_mic = gr.Markdown()
i_upload = gr.Interface(
inputs=[gr.Audio(sources="upload", type="filepath", label="Audio file")], outputs=gr.Markdown(), **shared_config
)
i_mic = gr.Interface(
inputs=[gr.Audio(sources="microphone", type="filepath", label="Microphone input")], outputs=gr.Markdown(), **shared_config
)
with gr.Blocks() as demo:
gr.TabbedInterface([i_upload, i_mic], ["Audio file", "Microphone"])
demo.queue(api_open=False, default_concurrency_limit=40).launch(show_api=False, show_error=True)
|