almncarlo commited on
Commit
0b1b9ce
Β·
verified Β·
1 Parent(s): 1c5768e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +254 -21
app.py CHANGED
@@ -1,29 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
2
  import torch
3
- from transformers import pipeline, AutoTokenizer
4
- from nemo.collections.asr.models import EncDecMultiTaskModel
5
 
6
- # load model
7
- canary_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- # update dcode params
10
- decode_cfg = canary_model.cfg.decoding
11
- decode_cfg.beam.beam_size = 1
12
- canary_model.change_decoding_strategy(decode_cfg)
13
 
14
- pipe = pipeline(
15
- "automatic-speech-recognition",
16
- model="nvidia/canary-1b"
 
 
17
  )
18
 
19
- # pipe = pipeline(
20
- # "text-generation",
21
- # model="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
22
- # model_kwargs={"torch_dtype": torch.bfloat16},
23
- # device_map="auto"
24
- # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- gr.Interface.from_pipeline(pipe,
27
- title="ASR",
28
- description="Using pipeline with Canary-1B",
29
- ).launch(inbrowser=True)
 
1
+ # import gradio as gr
2
+ # import torch
3
+ # from transformers import pipeline, AutoTokenizer
4
+ # from nemo.collections.asr.models import EncDecMultiTaskModel
5
+
6
+ # # load model
7
+ # canary_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b')
8
+
9
+ # # update dcode params
10
+ # decode_cfg = canary_model.cfg.decoding
11
+ # decode_cfg.beam.beam_size = 1
12
+ # canary_model.change_decoding_strategy(decode_cfg)
13
+
14
+ # pipe = pipeline(
15
+ # "automatic-speech-recognition",
16
+ # model="nvidia/canary-1b"
17
+ # )
18
+
19
+ # # pipe = pipeline(
20
+ # # "text-generation",
21
+ # # model="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
22
+ # # model_kwargs={"torch_dtype": torch.bfloat16},
23
+ # # device_map="auto"
24
+ # # )
25
+
26
+ # gr.Interface.from_pipeline(pipe,
27
+ # title="ASR",
28
+ # description="Using pipeline with Canary-1B",
29
+ # ).launch(inbrowser=True)
30
+
31
  import gradio as gr
32
+ import json
33
+ import librosa
34
+ import os
35
+ import soundfile as sf
36
+ import tempfile
37
+ import uuid
38
+
39
  import torch
 
 
40
 
41
+ from nemo.collections.asr.models import ASRModel
42
+ from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchMultiTaskAED
43
+ from nemo.collections.asr.parts.utils.transcribe_utils import get_buffered_pred_feat_multitaskAED
44
+
45
+ SAMPLE_RATE = 16000 # Hz
46
+ MAX_AUDIO_MINUTES = 180 # wont try to transcribe if longer than this
47
+
48
+ model = ASRModel.from_pretrained("nvidia/canary-1b")
49
+ model.eval()
50
+
51
+ # make sure beam size always 1 for consistency
52
+ model.change_decoding_strategy(None)
53
+ decoding_cfg = model.cfg.decoding
54
+ decoding_cfg.beam.beam_size = 1
55
+ model.change_decoding_strategy(decoding_cfg)
56
+
57
+ # setup for buffered inference
58
+ model.cfg.preprocessor.dither = 0.0
59
+ model.cfg.preprocessor.pad_to = 0
60
 
61
+ feature_stride = model.cfg.preprocessor['window_stride']
62
+ model_stride_in_secs = feature_stride * 8 # 8 = model stride, which is 8 for FastConformer
 
 
63
 
64
+ frame_asr = FrameBatchMultiTaskAED(
65
+ asr_model=model,
66
+ frame_len=40.0,
67
+ total_buffer=40.0,
68
+ batch_size=16,
69
  )
70
 
71
+ amp_dtype = torch.float16
72
+
73
+ def convert_audio(audio_filepath, tmpdir, utt_id):
74
+ """
75
+ Convert all files to monochannel 16 kHz wav files.
76
+ Do not convert and raise error if audio too long.
77
+ Returns output filename and duration.
78
+ """
79
+
80
+ data, sr = librosa.load(audio_filepath, sr=None, mono=True)
81
+
82
+ duration = librosa.get_duration(y=data, sr=sr)
83
+
84
+ if duration / 60.0 > MAX_AUDIO_MINUTES:
85
+ raise gr.Error(
86
+ f"This demo can transcribe up to {MAX_AUDIO_MINUTES} minutes of audio. "
87
+ "If you wish, you may trim the audio using the Audio viewer in Step 1 "
88
+ "(click on the scissors icon to start trimming audio)."
89
+ )
90
+
91
+ if sr != SAMPLE_RATE:
92
+ data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)
93
+
94
+ out_filename = os.path.join(tmpdir, utt_id + '.wav')
95
+
96
+ # save output audio
97
+ sf.write(out_filename, data, SAMPLE_RATE)
98
+
99
+ return out_filename, duration
100
+
101
+
102
+ def transcribe(audio_filepath, src_lang, tgt_lang, pnc):
103
+
104
+ if audio_filepath is None:
105
+ raise gr.Error("Please provide some input audio: either upload an audio file or use the microphone")
106
+
107
+ utt_id = uuid.uuid4()
108
+ with tempfile.TemporaryDirectory() as tmpdir:
109
+ converted_audio_filepath, duration = convert_audio(audio_filepath, tmpdir, str(utt_id))
110
+
111
+ # map src_lang and tgt_lang from long versions to short
112
+ LANG_LONG_TO_LANG_SHORT = {
113
+ "English": "en",
114
+ "Spanish": "es",
115
+ "French": "fr",
116
+ "German": "de",
117
+ }
118
+ if src_lang not in LANG_LONG_TO_LANG_SHORT.keys():
119
+ raise ValueError(f"src_lang must be one of {LANG_LONG_TO_LANG_SHORT.keys()}")
120
+ else:
121
+ src_lang = LANG_LONG_TO_LANG_SHORT[src_lang]
122
+
123
+ if tgt_lang not in LANG_LONG_TO_LANG_SHORT.keys():
124
+ raise ValueError(f"tgt_lang must be one of {LANG_LONG_TO_LANG_SHORT.keys()}")
125
+ else:
126
+ tgt_lang = LANG_LONG_TO_LANG_SHORT[tgt_lang]
127
+
128
+
129
+ # infer taskname from src_lang and tgt_lang
130
+ if src_lang == tgt_lang:
131
+ taskname = "asr"
132
+ else:
133
+ taskname = "s2t_translation"
134
+
135
+ # update pnc variable to be "yes" or "no"
136
+ pnc = "yes" if pnc else "no"
137
+
138
+ # make manifest file and save
139
+ manifest_data = {
140
+ "audio_filepath": converted_audio_filepath,
141
+ "source_lang": src_lang,
142
+ "target_lang": tgt_lang,
143
+ "taskname": taskname,
144
+ "pnc": pnc,
145
+ "answer": "predict",
146
+ "duration": str(duration),
147
+ }
148
+
149
+ manifest_filepath = os.path.join(tmpdir, f'{utt_id}.json')
150
+
151
+ with open(manifest_filepath, 'w') as fout:
152
+ line = json.dumps(manifest_data)
153
+ fout.write(line + '\n')
154
+
155
+ # call transcribe, passing in manifest filepath
156
+ if duration < 40:
157
+ output_text = model.transcribe(manifest_filepath)[0]
158
+ else: # do buffered inference
159
+ with torch.cuda.amp.autocast(dtype=amp_dtype): # TODO: make it work if no cuda
160
+ with torch.no_grad():
161
+ hyps = get_buffered_pred_feat_multitaskAED(
162
+ frame_asr,
163
+ model.cfg.preprocessor,
164
+ model_stride_in_secs,
165
+ model.device,
166
+ manifest=manifest_filepath,
167
+ filepaths=None,
168
+ )
169
+
170
+ output_text = hyps[0].text
171
+
172
+ return output_text
173
+
174
+ with gr.Blocks(
175
+ title="NeMo Canary Model",
176
+ css="""
177
+ textarea { font-size: 18px;}
178
+ #model_output_text_box span {
179
+ font-size: 18px;
180
+ font-weight: bold;
181
+ }
182
+ """,
183
+ theme=gr.themes.Default(text_size=gr.themes.sizes.text_lg) # make text slightly bigger (default is text_md )
184
+ ) as demo:
185
+
186
+ gr.HTML("<h1 style='text-align: center'>NeMo Canary model: Transcribe & Translate audio</h1>")
187
+
188
+ with gr.Row():
189
+ with gr.Column():
190
+ gr.HTML(
191
+ "<p><b>Step 1:</b> Upload an audio file or record with your microphone.</p>"
192
+
193
+ "<p style='color: #A0A0A0;'>This demo supports audio files up to 10 mins long. "
194
+ "You can transcribe longer files locally with this NeMo "
195
+ "<a href='https://github.com/NVIDIA/NeMo/blob/main/examples/asr/speech_multitask/speech_to_text_aed_chunked_infer.py'>script</a>.</p>"
196
+ )
197
+
198
+ audio_file = gr.Audio(sources=["microphone", "upload"], type="filepath")
199
+
200
+ gr.HTML("<p><b>Step 2:</b> Choose the input and output language.</p>")
201
+
202
+ src_lang = gr.Dropdown(
203
+ choices=["English", "Spanish", "French", "German"],
204
+ value="English",
205
+ label="Input audio is spoken in:"
206
+ )
207
+
208
+ with gr.Column():
209
+ tgt_lang = gr.Dropdown(
210
+ choices=["English", "Spanish", "French", "German"],
211
+ value="English",
212
+ label="Transcribe in language:"
213
+ )
214
+ pnc = gr.Checkbox(
215
+ value=True,
216
+ label="Punctuation & Capitalization in transcript?",
217
+ )
218
+
219
+ with gr.Column():
220
+
221
+ gr.HTML("<p><b>Step 3:</b> Run the model.</p>")
222
+
223
+ go_button = gr.Button(
224
+ value="Run model",
225
+ variant="primary", # make "primary" so it stands out (default is "secondary")
226
+ )
227
+
228
+ model_output_text_box = gr.Textbox(
229
+ label="Model Output",
230
+ elem_id="model_output_text_box",
231
+ )
232
+
233
+ with gr.Row():
234
+
235
+ gr.HTML(
236
+ "<p style='text-align: center'>"
237
+ "🐀 <a href='https://huggingface.co/nvidia/canary-1b' target='_blank'>Canary model</a> | "
238
+ "πŸ§‘β€πŸ’» <a href='https://github.com/NVIDIA/NeMo' target='_blank'>NeMo Repository</a>"
239
+ "</p>"
240
+ )
241
+
242
+ go_button.click(
243
+ fn=transcribe,
244
+ inputs = [audio_file, src_lang, tgt_lang, pnc],
245
+ outputs = [model_output_text_box]
246
+ )
247
+
248
+ # call on_src_or_tgt_lang_change whenever src_lang or tgt_lang dropdown menus are changed
249
+ src_lang.change(
250
+ fn=on_src_or_tgt_lang_change,
251
+ inputs=[src_lang, tgt_lang, pnc],
252
+ outputs=[src_lang, tgt_lang, pnc],
253
+ )
254
+ tgt_lang.change(
255
+ fn=on_src_or_tgt_lang_change,
256
+ inputs=[src_lang, tgt_lang, pnc],
257
+ outputs=[src_lang, tgt_lang, pnc],
258
+ )
259
+
260
 
261
+ demo.queue()
262
+ demo.launch(share=True)