cstr commited on
Commit
da9138c
1 Parent(s): 537c9a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -5
app.py CHANGED
@@ -3,22 +3,61 @@ import os
3
  import time
4
  import sys
5
  import subprocess
 
 
 
6
 
7
  # Clone and install faster-whisper from GitHub
8
  subprocess.run(["git", "clone", "https://github.com/SYSTRAN/faster-whisper.git"], check=True)
9
  subprocess.run(["pip", "install", "-e", "./faster-whisper"], check=True)
 
10
 
11
  # Add the faster-whisper directory to the Python path
12
  sys.path.append("./faster-whisper")
13
 
14
  from faster_whisper import WhisperModel
15
  from faster_whisper.transcribe import BatchedInferencePipeline
 
16
 
17
- def transcribe_audio(audio_path, batch_size):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  # Initialize the model
19
  model = WhisperModel("cstr/whisper-large-v3-turbo-int8_float32", device="auto", compute_type="int8")
20
  batched_model = BatchedInferencePipeline(model=model)
21
 
 
 
 
 
 
 
 
 
22
  # Benchmark transcription time
23
  start_time = time.time()
24
  segments, info = batched_model.transcribe(audio_path, batch_size=batch_size)
@@ -42,19 +81,27 @@ def transcribe_audio(audio_path, batch_size):
42
  output += f"Real-time factor: {real_time_factor:.2f}x\n"
43
  output += f"Audio file size: {audio_file_size:.2f} MB"
44
 
 
 
 
 
45
  return output
46
 
47
  # Gradio interface
48
  iface = gr.Interface(
49
  fn=transcribe_audio,
50
  inputs=[
51
- gr.Audio(type="filepath", label="Upload Audio File"),
52
  gr.Slider(minimum=1, maximum=32, step=1, value=16, label="Batch Size")
53
  ],
54
  outputs=gr.Textbox(label="Transcription and Metrics"),
55
- title="Faster Whisper Transcription",
56
- description="Upload an audio file to transcribe using Faster Whisper v3 turbo int8. Adjust the batch size for performance tuning.",
57
- examples=[["path/to/example/audio.mp3", 16]],
 
 
 
 
58
  )
59
 
60
  iface.launch()
 
3
  import time
4
  import sys
5
  import subprocess
6
+ import tempfile
7
+ import requests
8
+ from urllib.parse import urlparse
9
 
10
  # Clone and install faster-whisper from GitHub
11
  subprocess.run(["git", "clone", "https://github.com/SYSTRAN/faster-whisper.git"], check=True)
12
  subprocess.run(["pip", "install", "-e", "./faster-whisper"], check=True)
13
+ subprocess.run(["pip", "install", "yt-dlp"], check=True)
14
 
15
  # Add the faster-whisper directory to the Python path
16
  sys.path.append("./faster-whisper")
17
 
18
  from faster_whisper import WhisperModel
19
  from faster_whisper.transcribe import BatchedInferencePipeline
20
+ import yt_dlp
21
 
22
+ def download_audio(url):
23
+ parsed_url = urlparse(url)
24
+ if parsed_url.netloc == 'www.youtube.com' or parsed_url.netloc == 'youtu.be':
25
+ # YouTube video
26
+ ydl_opts = {
27
+ 'format': 'bestaudio/best',
28
+ 'postprocessors': [{
29
+ 'key': 'FFmpegExtractAudio',
30
+ 'preferredcodec': 'mp3',
31
+ 'preferredquality': '192',
32
+ }],
33
+ 'outtmpl': '%(id)s.%(ext)s',
34
+ }
35
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
36
+ info = ydl.extract_info(url, download=True)
37
+ return f"{info['id']}.mp3"
38
+ else:
39
+ # Direct MP3 URL
40
+ response = requests.get(url)
41
+ if response.status_code == 200:
42
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_file:
43
+ temp_file.write(response.content)
44
+ return temp_file.name
45
+ else:
46
+ raise Exception(f"Failed to download audio from {url}")
47
+
48
+ def transcribe_audio(input_source, batch_size):
49
  # Initialize the model
50
  model = WhisperModel("cstr/whisper-large-v3-turbo-int8_float32", device="auto", compute_type="int8")
51
  batched_model = BatchedInferencePipeline(model=model)
52
 
53
+ # Handle input source
54
+ if isinstance(input_source, str) and (input_source.startswith('http://') or input_source.startswith('https://')):
55
+ # It's a URL, download the audio
56
+ audio_path = download_audio(input_source)
57
+ else:
58
+ # It's a local file path
59
+ audio_path = input_source
60
+
61
  # Benchmark transcription time
62
  start_time = time.time()
63
  segments, info = batched_model.transcribe(audio_path, batch_size=batch_size)
 
81
  output += f"Real-time factor: {real_time_factor:.2f}x\n"
82
  output += f"Audio file size: {audio_file_size:.2f} MB"
83
 
84
+ # Clean up downloaded file if it was a URL
85
+ if isinstance(input_source, str) and (input_source.startswith('http://') or input_source.startswith('https://')):
86
+ os.remove(audio_path)
87
+
88
  return output
89
 
90
  # Gradio interface
91
  iface = gr.Interface(
92
  fn=transcribe_audio,
93
  inputs=[
94
+ gr.inputs.Textbox(label="Audio Source (Upload, MP3 URL, or YouTube URL)"),
95
  gr.Slider(minimum=1, maximum=32, step=1, value=16, label="Batch Size")
96
  ],
97
  outputs=gr.Textbox(label="Transcription and Metrics"),
98
+ title="Faster Whisper v3 turbo int8 transcription",
99
+ description="Enter an audio file path, MP3 URL, or YouTube URL to transcribe using Faster Whisper v3 turbo (int8). Adjust the batch size for performance tuning.",
100
+ examples=[
101
+ ["https://www.youtube.com/watch?v=dQw4w9WgXcQ", 16],
102
+ ["https://example.com/path/to/audio.mp3", 16],
103
+ ["path/to/local/audio.mp3", 16]
104
+ ],
105
  )
106
 
107
  iface.launch()