Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ def record_opt(msg):
|
|
16 |
return f"{date_now()} {msg}\n"
|
17 |
|
18 |
|
19 |
-
def speech_recognize(audio, model_name,
|
20 |
opt += record_opt("Transcription starts ...")
|
21 |
yield "Transcribing, please wait..", opt
|
22 |
start = time.monotonic()
|
@@ -26,7 +26,7 @@ def speech_recognize(audio, model_name, hf_token, opt):
|
|
26 |
try:
|
27 |
url = API_URL + model_name
|
28 |
print(f">>> url is {url}")
|
29 |
-
headers = {"Authorization": f"Bearer {
|
30 |
response = requests.request("POST", url, headers=headers, data=data)
|
31 |
text = json.loads(response.content.decode("utf-8"))
|
32 |
print(f">>> text is {text}")
|
@@ -48,7 +48,7 @@ with gr.Blocks() as demo:
|
|
48 |
|
49 |
👉 The purpose is to practice using the Gradio Audio component and explore using the Huggingface Inference API
|
50 |
|
51 |
-
> 💡Tip: You need to fill in the Huggingface token to call the Huggingface Inference API
|
52 |
"""
|
53 |
)
|
54 |
with gr.Row():
|
@@ -67,7 +67,7 @@ with gr.Blocks() as demo:
|
|
67 |
],
|
68 |
value="openai/whisper-large-v3",
|
69 |
)
|
70 |
-
|
71 |
with gr.Column():
|
72 |
output = gr.Textbox(label="Transcription results")
|
73 |
operation = gr.Textbox(label="Component operation history")
|
@@ -91,7 +91,7 @@ with gr.Blocks() as demo:
|
|
91 |
lambda x: x + record_opt("Finished playing"),
|
92 |
inputs=operation, outputs=operation
|
93 |
)
|
94 |
-
audio.stop_recording(speech_recognize, inputs=[audio, model_name,
|
95 |
|
96 |
demo.queue(max_size=4, concurrency_count=4)
|
97 |
demo.launch()
|
|
|
16 |
return f"{date_now()} {msg}\n"
|
17 |
|
18 |
|
19 |
+
def speech_recognize(audio, model_name, access_token, opt):
|
20 |
opt += record_opt("Transcription starts ...")
|
21 |
yield "Transcribing, please wait..", opt
|
22 |
start = time.monotonic()
|
|
|
26 |
try:
|
27 |
url = API_URL + model_name
|
28 |
print(f">>> url is {url}")
|
29 |
+
headers = {"Authorization": f"Bearer {access_token}"}
|
30 |
response = requests.request("POST", url, headers=headers, data=data)
|
31 |
text = json.loads(response.content.decode("utf-8"))
|
32 |
print(f">>> text is {text}")
|
|
|
48 |
|
49 |
👉 The purpose is to practice using the Gradio Audio component and explore using the Huggingface Inference API
|
50 |
|
51 |
+
> 💡Tip: You need to fill in the Huggingface access token to call the Huggingface Inference API
|
52 |
"""
|
53 |
)
|
54 |
with gr.Row():
|
|
|
67 |
],
|
68 |
value="openai/whisper-large-v3",
|
69 |
)
|
70 |
+
access_token = gr.Textbox(label="Huggingface access token")
|
71 |
with gr.Column():
|
72 |
output = gr.Textbox(label="Transcription results")
|
73 |
operation = gr.Textbox(label="Component operation history")
|
|
|
91 |
lambda x: x + record_opt("Finished playing"),
|
92 |
inputs=operation, outputs=operation
|
93 |
)
|
94 |
+
audio.stop_recording(speech_recognize, inputs=[audio, model_name, access_token, operation], outputs=[output, operation])
|
95 |
|
96 |
demo.queue(max_size=4, concurrency_count=4)
|
97 |
demo.launch()
|