Spaces:
Sleeping
Sleeping
StevenChen16
commited on
Commit
·
69b1e14
1
Parent(s):
f13acc2
first update
Browse files- app.py +20 -4
- requirements.txt +2 -0
app.py
CHANGED
@@ -1,7 +1,23 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
|
3 |
-
def
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import whisperx
|
3 |
+
import whisper
|
4 |
|
5 |
+
def transcribe(audio_file):
|
6 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
|
8 |
+
# Transcribe with original Whisper
|
9 |
+
model = whisper.load_model("large", device)
|
10 |
+
result = model.transcribe(audio_file)
|
11 |
+
|
12 |
+
# Load alignment model and metadata
|
13 |
+
model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
|
14 |
+
|
15 |
+
# Align Whisper output
|
16 |
+
result_aligned = whisperx.align(result["segments"], model_a, metadata, audio_file, device)
|
17 |
+
|
18 |
+
return {"aligned": result_aligned["segments"], "word_segments": result_aligned["word_segments"]}
|
19 |
+
|
20 |
+
inputs = gr.inputs.Audio(source="upload", type="filepath")
|
21 |
+
outputs = gr.outputs.JSON()
|
22 |
+
|
23 |
+
gr.Interface(fn=transcribe, inputs=inputs, outputs=outputs).launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
git+https://github.com/m-bain/whisperx.git
|