Spaces:
Running
Running
Commit
·
d6e7a74
1
Parent(s):
e5fc112
Update app.py
Browse files
app.py
CHANGED
@@ -1,78 +1,22 @@
|
|
1 |
-
import os
|
2 |
-
os.system('/usr/local/bin/python -m pip install --upgrade pip')
|
3 |
-
os.system("pip install git+https://github.com/openai/whisper.git")
|
4 |
-
os.system("pip install gradio --upgrade")
|
5 |
import gradio as gr
|
6 |
import whisper
|
7 |
|
8 |
-
|
9 |
-
|
10 |
model = whisper.load_model("small")
|
11 |
|
12 |
-
|
13 |
-
|
14 |
def inference(audio):
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
19 |
-
|
20 |
-
_, probs = model.detect_language(mel)
|
21 |
-
|
22 |
-
options = whisper.DecodingOptions(fp16 = False)
|
23 |
-
result = whisper.decode(model, mel, options)
|
24 |
-
|
25 |
-
print(result.text)
|
26 |
-
return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
css = "footer {visibility: hidden}"
|
32 |
-
|
33 |
-
block = gr.Blocks(css=css)
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
with block:
|
38 |
-
gr.HTML(
|
39 |
-
"""
|
40 |
-
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
|
41 |
-
<div
|
42 |
-
style="
|
43 |
-
display: inline-flex;
|
44 |
-
align-items: center;
|
45 |
-
gap: 0.8rem;
|
46 |
-
font-size: 1.75rem;
|
47 |
-
"
|
48 |
-
>
|
49 |
-
|
50 |
-
|
51 |
-
<h1 style="font-weight: 900; margin-bottom: 7px;">
|
52 |
-
</h1>
|
53 |
-
</div>
|
54 |
-
</div>
|
55 |
-
"""
|
56 |
-
)
|
57 |
with gr.Group():
|
58 |
with gr.Box():
|
59 |
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
source="microphone",
|
64 |
-
type="filepath"
|
65 |
-
)
|
66 |
-
|
67 |
-
btn = gr.Button("Transcribir")
|
68 |
-
text = gr.Textbox(show_label=False, elem_id="result-textarea")
|
69 |
-
with gr.Group(elem_id="share-btn-container"):
|
70 |
-
loading_icon = gr.HTML(loading_icon_html, visible=False)
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
|
75 |
-
|
76 |
-
btn.click(inference, inputs=[audio], outputs=[text, loading_icon])
|
77 |
|
78 |
-
block.launch()
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import whisper
|
3 |
|
4 |
+
# Cargar el modelo Whisper
|
|
|
5 |
model = whisper.load_model("small")
|
6 |
|
|
|
|
|
7 |
def inference(audio):
|
8 |
+
result = model.transcribe(audio)
|
9 |
+
return result["text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
# Crear la interfaz de Gradio
|
12 |
+
with gr.Blocks() as block:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
with gr.Group():
|
14 |
with gr.Box():
|
15 |
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
16 |
+
audio_input = gr.Audio(label="Input Audio", source="microphone", type="filepath")
|
17 |
+
transcribe_button = gr.Button("Transcribir")
|
18 |
+
transcription_output = gr.Textbox(label="Transcription")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
transcribe_button.click(inference, inputs=[audio_input], outputs=[transcription_output])
|
|
|
21 |
|
22 |
+
block.launch()
|