Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ pipe = pipeline(task="automatic-speech-recognition", model="cdactvm/English_Mode
|
|
22 |
|
23 |
|
24 |
def transcribe_english(audio):
|
25 |
-
#
|
26 |
transcript = pipe(audio)
|
27 |
text_value = transcript['text']
|
28 |
cleaned_text=text_value.replace("<s>", "")
|
@@ -32,21 +32,6 @@ def transcribe_english(audio):
|
|
32 |
converted_text=text_to_int(replaced_words)
|
33 |
return converted_text
|
34 |
|
35 |
-
# def sel_lng(lng, mic=None, file=None):
|
36 |
-
# if mic is not None:
|
37 |
-
# audio = mic
|
38 |
-
# elif file is not None:
|
39 |
-
# audio = file
|
40 |
-
# else:
|
41 |
-
# return "You must either provide a mic recording or a file"
|
42 |
-
|
43 |
-
# if lng == "model_1":
|
44 |
-
# return transcribe_hindi_old(audio)
|
45 |
-
# elif lng == "model_2":
|
46 |
-
# return transcribe_hindi_new(audio)
|
47 |
-
# elif lng== "model_3":
|
48 |
-
# return transcribe_hindi_lm(audio)
|
49 |
-
|
50 |
demo=gr.Interface(
|
51 |
transcribe_english,
|
52 |
inputs=[
|
@@ -58,18 +43,3 @@ demo=gr.Interface(
|
|
58 |
title="Automatic Speech Recognition",
|
59 |
description = "Demo for Automatic Speech Recognition. Use microphone to record speech. Please press Record button. Initially it will take some time to load the model. The recognized text will appear in the output textbox",
|
60 |
).launch()
|
61 |
-
|
62 |
-
# demo=gr.Interface(
|
63 |
-
# fn=sel_lng,
|
64 |
-
|
65 |
-
# inputs=[
|
66 |
-
# gr.Dropdown([
|
67 |
-
# "model_1","model_2","model_3"],label="Select Model"),
|
68 |
-
# gr.Audio(sources=["microphone","upload"], type="filepath"),
|
69 |
-
# ],
|
70 |
-
# outputs=[
|
71 |
-
# "textbox"
|
72 |
-
# ],
|
73 |
-
# title="Automatic Speech Recognition",
|
74 |
-
# description = "Demo for Automatic Speech Recognition. Use microphone to record speech. Please press Record button. Initially it will take some time to load the model. The recognized text will appear in the output textbox",
|
75 |
-
# ).launch()
|
|
|
22 |
|
23 |
|
24 |
def transcribe_english(audio):
|
25 |
+
# Process the audio file
|
26 |
transcript = pipe(audio)
|
27 |
text_value = transcript['text']
|
28 |
cleaned_text=text_value.replace("<s>", "")
|
|
|
32 |
converted_text=text_to_int(replaced_words)
|
33 |
return converted_text
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
demo=gr.Interface(
|
36 |
transcribe_english,
|
37 |
inputs=[
|
|
|
43 |
title="Automatic Speech Recognition",
|
44 |
description = "Demo for Automatic Speech Recognition. Use microphone to record speech. Please press Record button. Initially it will take some time to load the model. The recognized text will appear in the output textbox",
|
45 |
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|