Kamtera commited on
Commit
d3e1b2e
1 Parent(s): eb5fb7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -99,7 +99,7 @@ function update() {
99
  var INLINE_JSON='''
100
  html_seeker2=''';update();
101
  </script>'''
102
-
103
  model_name = "voidful/wav2vec2-xlsr-multilingual-56"
104
  model0 = pipeline(task="automatic-speech-recognition",
105
  model=model_name)
@@ -111,15 +111,17 @@ model2 = pipeline(task="automatic-speech-recognition",
111
  model_name = "ghofrani/common8"
112
  model1 = pipeline(task="automatic-speech-recognition",
113
  model=model_name)
114
-
115
  import json
116
  def predict_fa(speech,model):
117
- if model== "SLPL/Sharif-wav2vec2":
118
  text = model2(speech,return_timestamps="word" )
119
  elif model== "ghofrani/common8":
120
  text = model1(speech,return_timestamps="word" )
121
  elif model== "voidful/wav2vec2-xlsr-multilingual-56":
122
  text = model0(speech,return_timestamps="word" )
 
 
123
  return [text['text'],json.dumps(text),html_seeker+speech+html_seeker1+json.dumps(text)+html_seeker2]
124
 
125
 
@@ -160,7 +162,7 @@ with gr.Blocks() as demo:
160
  inputs_model_fa =gr.inputs.Radio(label="Language", choices=["ghofrani/common8","SLPL/Sharif-wav2vec2","voidful/wav2vec2-xlsr-multilingual-56"])
161
  output_transcribe1_fa = gr.Textbox(label="Transcribed text:")
162
  output_transcribe1_fa1 = gr.Textbox(label="Transcribed text with timestamps:")
163
- output_transcribe1_fa2 =gr.HTML("")
164
  transcribe_audio1_fa= gr.Button("Submit")
165
  with gr.Tab("google"):
166
  gr.Markdown("set your speech language")
 
99
  var INLINE_JSON='''
100
  html_seeker2=''';update();
101
  </script>'''
102
+ '''
103
  model_name = "voidful/wav2vec2-xlsr-multilingual-56"
104
  model0 = pipeline(task="automatic-speech-recognition",
105
  model=model_name)
 
111
  model_name = "ghofrani/common8"
112
  model1 = pipeline(task="automatic-speech-recognition",
113
  model=model_name)
114
+ '''
115
  import json
116
  def predict_fa(speech,model):
117
+ '''if model== "SLPL/Sharif-wav2vec2":
118
  text = model2(speech,return_timestamps="word" )
119
  elif model== "ghofrani/common8":
120
  text = model1(speech,return_timestamps="word" )
121
  elif model== "voidful/wav2vec2-xlsr-multilingual-56":
122
  text = model0(speech,return_timestamps="word" )
123
+ '''
124
+ text={"text": "\u0627\u06cc\u0646\u0627\u0646 \u06a9\u0631\u0627\u0644\u0627\u0644 \u0648 \u06a9\u0648\u0631\u0646\u062f \u0648 \u0644\u0632\u0627 \u0627\u0632 \u06af\u0645\u0631\u0627\u0647\u06cc \u0628\u0647 \u0631\u0627\u0647 \u0628\u0627\u0632 \u0646\u0645\u06cc\u06a9\u0631\u062f\u0646\u062f", "chunks": [{"text": "\u0627\u06cc\u0646\u0627\u0646", "timestamp": [0.0, 0.72]}, {"text": "\u06a9\u0631\u0627\u0644\u0627\u0644", "timestamp": [0.92, 1.6]}, {"text": "\u0648", "timestamp": [1.72, 1.74]}, {"text": "\u06a9\u0648\u0631\u0646\u062f", "timestamp": [1.9, 2.54]}, {"text": "\u0648", "timestamp": [2.76, 2.78]}, {"text": "\u0644\u0632\u0627", "timestamp": [2.88, 3.16]}, {"text": "\u0627\u0632", "timestamp": [3.4, 3.5]}, {"text": "\u06af\u0645\u0631\u0627\u0647\u06cc", "timestamp": [3.64, 4.3]}, {"text": "\u0628\u0647", "timestamp": [4.6, 4.68]}, {"text": "\u0631\u0627\u0647", "timestamp": [4.78, 5.12]}, {"text": "\u0628\u0627\u0632", "timestamp": [5.3, 5.58]}, {"text": "\u0646\u0645\u06cc\u06a9\u0631\u062f\u0646\u062f", "timestamp": [5.68, 7.14]}]}
125
  return [text['text'],json.dumps(text),html_seeker+speech+html_seeker1+json.dumps(text)+html_seeker2]
126
 
127
 
 
162
  inputs_model_fa =gr.inputs.Radio(label="Language", choices=["ghofrani/common8","SLPL/Sharif-wav2vec2","voidful/wav2vec2-xlsr-multilingual-56"])
163
  output_transcribe1_fa = gr.Textbox(label="Transcribed text:")
164
  output_transcribe1_fa1 = gr.Textbox(label="Transcribed text with timestamps:")
165
+ output_transcribe1_fa2 =gr.HTML(label="")
166
  transcribe_audio1_fa= gr.Button("Submit")
167
  with gr.Tab("google"):
168
  gr.Markdown("set your speech language")