File size: 4,093 Bytes
0bb0d8e
 
 
16bc3e4
0bb0d8e
da9138c
 
 
0bb0d8e
16bc3e4
 
 
da9138c
16bc3e4
 
 
 
 
 
da9138c
16bc3e4
da9138c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16bc3e4
 
 
 
da9138c
 
 
 
 
 
 
 
16bc3e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
da9138c
 
 
 
16bc3e4
 
 
 
 
 
da9138c
16bc3e4
 
 
da9138c
 
 
 
 
 
 
16bc3e4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import gradio as gr
import os
import time
import sys
import subprocess
import tempfile
import requests
from urllib.parse import urlparse

# Clone and install faster-whisper from GitHub
subprocess.run(["git", "clone", "https://github.com/SYSTRAN/faster-whisper.git"], check=True)
subprocess.run(["pip", "install", "-e", "./faster-whisper"], check=True)
subprocess.run(["pip", "install", "yt-dlp"], check=True)

# Add the faster-whisper directory to the Python path
sys.path.append("./faster-whisper")

from faster_whisper import WhisperModel
from faster_whisper.transcribe import BatchedInferencePipeline
import yt_dlp

def download_audio(url):
    parsed_url = urlparse(url)
    if parsed_url.netloc == 'www.youtube.com' or parsed_url.netloc == 'youtu.be':
        # YouTube video
        ydl_opts = {
            'format': 'bestaudio/best',
            'postprocessors': [{
                'key': 'FFmpegExtractAudio',
                'preferredcodec': 'mp3',
                'preferredquality': '192',
            }],
            'outtmpl': '%(id)s.%(ext)s',
        }
        with yt_dlp.YoutubeDL(ydl_opts) as ydl:
            info = ydl.extract_info(url, download=True)
            return f"{info['id']}.mp3"
    else:
        # Direct MP3 URL
        response = requests.get(url)
        if response.status_code == 200:
            with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_file:
                temp_file.write(response.content)
                return temp_file.name
        else:
            raise Exception(f"Failed to download audio from {url}")

def transcribe_audio(input_source, batch_size):
    # Initialize the model
    model = WhisperModel("cstr/whisper-large-v3-turbo-int8_float32", device="auto", compute_type="int8")
    batched_model = BatchedInferencePipeline(model=model)

    # Handle input source
    if isinstance(input_source, str) and (input_source.startswith('http://') or input_source.startswith('https://')):
        # It's a URL, download the audio
        audio_path = download_audio(input_source)
    else:
        # It's a local file path
        audio_path = input_source

    # Benchmark transcription time
    start_time = time.time()
    segments, info = batched_model.transcribe(audio_path, batch_size=batch_size)
    end_time = time.time()

    # Generate transcription
    transcription = ""
    for segment in segments:
        transcription += f"[{segment.start:.2f}s -> {segment.end:.2f}s] {segment.text}\n"

    # Calculate metrics
    transcription_time = end_time - start_time
    real_time_factor = info.duration / transcription_time
    audio_file_size = os.path.getsize(audio_path) / (1024 * 1024)  # Size in MB

    # Prepare output
    output = f"Transcription:\n\n{transcription}\n"
    output += f"\nLanguage: {info.language}, Probability: {info.language_probability:.2f}\n"
    output += f"Duration: {info.duration:.2f}s, Duration after VAD: {info.duration_after_vad:.2f}s\n"
    output += f"Transcription time: {transcription_time:.2f} seconds\n"
    output += f"Real-time factor: {real_time_factor:.2f}x\n"
    output += f"Audio file size: {audio_file_size:.2f} MB"

    # Clean up downloaded file if it was a URL
    if isinstance(input_source, str) and (input_source.startswith('http://') or input_source.startswith('https://')):
        os.remove(audio_path)

    return output

# Gradio interface
iface = gr.Interface(
    fn=transcribe_audio,
    inputs=[
        gr.inputs.Textbox(label="Audio Source (Upload, MP3 URL, or YouTube URL)"),
        gr.Slider(minimum=1, maximum=32, step=1, value=16, label="Batch Size")
    ],
    outputs=gr.Textbox(label="Transcription and Metrics"),
    title="Faster Whisper v3 turbo int8 transcription",
    description="Enter an audio file path, MP3 URL, or YouTube URL to transcribe using Faster Whisper v3 turbo (int8). Adjust the batch size for performance tuning.",
    examples=[
        ["https://www.youtube.com/watch?v=dQw4w9WgXcQ", 16],
        ["https://example.com/path/to/audio.mp3", 16],
        ["path/to/local/audio.mp3", 16]
    ],
)

iface.launch()