jiuuee commited on
Commit
542e22d
1 Parent(s): 8b95a5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -242
app.py CHANGED
@@ -14,249 +14,8 @@ def transcribe(audio):
14
 
15
  return transcription[0]
16
 
17
- audio_input = gr.components.Audio()
18
 
19
  iface = gr.Interface(transcribe, audio_input, "text", title="ASR with NeMo Canary Model")
20
  iface.launch()
21
 
22
- '''
23
- import gradio as gr
24
- from transformers import pipeline
25
- # Load pipelines for Canary ASR, LLama3 QA, and VITS TTS
26
- asr_pipeline = pipeline("automatic-speech-recognition", model="nvidia/canary-1b", device=0)
27
- qa_pipeline = pipeline("question-answering", model="LLAMA/llama3-base-qa", tokenizer="LLAMA/llama3-base-qa")
28
- tts_pipeline = pipeline("text-to-speech", model="patrickvonplaten/vits-large", device=0)
29
-
30
-
31
- import gradio as gr
32
- import json
33
- import librosa
34
- import os
35
- import soundfile as sf
36
- import tempfile
37
- import uuid
38
- from transformers import pipeline
39
-
40
- import torch
41
-
42
- from nemo.collections.asr.models import ASRModel
43
- from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchMultiTaskAED
44
- from nemo.collections.asr.parts.utils.transcribe_utils import get_buffered_pred_feat_multitaskAED
45
-
46
- SAMPLE_RATE = 16000 # Hz
47
- MAX_AUDIO_SECS = 30 # wont try to transcribe if longer than this
48
- src_lang = "en"
49
- tgt_lang = "en"
50
- pnc="no"
51
-
52
- model = ASRModel.from_pretrained("nvidia/canary-1b")
53
- model.eval()
54
-
55
- # make sure beam size always 1 for consistency
56
- model.change_decoding_strategy(None)
57
- decoding_cfg = model.cfg.decoding
58
- decoding_cfg.beam.beam_size = 1
59
- model.change_decoding_strategy(decoding_cfg)
60
-
61
- # setup for buffered inference
62
- model.cfg.preprocessor.dither = 0.0
63
- model.cfg.preprocessor.pad_to = 0
64
-
65
- feature_stride = model.cfg.preprocessor['window_stride']
66
- model_stride_in_secs = feature_stride * 8 # 8 = model stride, which is 8 for FastConformer
67
-
68
- frame_asr = FrameBatchMultiTaskAED(
69
- asr_model=model,
70
- frame_len=40.0,
71
- total_buffer=40.0,
72
- batch_size=16,
73
- )
74
-
75
- amp_dtype = torch.float16
76
-
77
-
78
- def convert_audio(audio_filepath, tmpdir, utt_id):
79
- """
80
- Convert all files to monochannel 16 kHz wav files.
81
- Do not convert and raise error if audio too long.
82
- Returns output filename and duration.
83
- """
84
- data, sr = librosa.load(audio_filepath, sr=None, mono=True)
85
-
86
- duration = librosa.get_duration(y=data, sr=sr)
87
-
88
- if duration > MAX_AUDIO_SECS:
89
- raise gr.Error(
90
- f"This demo can transcribe up to {MAX_AUDIO_MINUTES} minutes of audio. "
91
- "If you wish, you may trim the audio using the Audio viewer in Step 1 "
92
- "(click on the scissors icon to start trimming audio)."
93
- )
94
-
95
- if sr != SAMPLE_RATE:
96
- data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)
97
-
98
- out_filename = os.path.join(tmpdir, utt_id + '.wav')
99
-
100
- # save output audio
101
- sf.write(out_filename, data, SAMPLE_RATE)
102
-
103
- return out_filename, duration
104
-
105
-
106
- def transcribe(audio_filepath, src_lang, tgt_lang, pnc):
107
-
108
- if audio_filepath is None:
109
- raise gr.Error("Please provide some input audio: either upload an audio file or use the microphone")
110
-
111
- utt_id = uuid.uuid4()
112
- with tempfile.TemporaryDirectory() as tmpdir:
113
- converted_audio_filepath, duration = convert_audio(audio_filepath, tmpdir, str(utt_id))
114
-
115
- # make manifest file and save
116
- manifest_data = {
117
- "audio_filepath": converted_audio_filepath,
118
- "source_lang": src_lang,
119
- "target_lang": tgt_lang,
120
- "taskname": taskname,
121
- "pnc": pnc,
122
- "answer": "predict",
123
- "duration": str(duration),
124
- }
125
-
126
- manifest_filepath = os.path.join(tmpdir, f'{utt_id}.json')
127
-
128
- with open(manifest_filepath, 'w') as fout:
129
- line = json.dumps(manifest_data)
130
- fout.write(line + '\n')
131
-
132
- # call transcribe, passing in manifest filepath
133
- if duration < 40:
134
- output_text = model.transcribe(manifest_filepath)[0]
135
- else: # do buffered inference
136
- with torch.cuda.amp.autocast(dtype=amp_dtype): # TODO: make it work if no cuda
137
- with torch.no_grad():
138
- hyps = get_buffered_pred_feat_multitaskAED(
139
- frame_asr,
140
- model.cfg.preprocessor,
141
- model_stride_in_secs,
142
- model.device,
143
- manifest=manifest_filepath,
144
- filepaths=None,
145
- )
146
-
147
- output_text = hyps[0].text
148
-
149
- return output_text
150
-
151
-
152
-
153
- with gr.Blocks(
154
- title="NeMo Canary Model",
155
- css="""
156
- textarea { font-size: 18px;}
157
- #model_output_text_box span {
158
- font-size: 18px;
159
- font-weight: bold;
160
- }
161
- """,
162
- theme=gr.themes.Default(text_size=gr.themes.sizes.text_lg) # make text slightly bigger (default is text_md )
163
- ) as demo:
164
-
165
- gr.HTML("<h1 style='text-align: center'>NeMo Canary model: Transcribe & Translate audio</h1>")
166
-
167
- with gr.Row():
168
- with gr.Column():
169
- gr.HTML(
170
- "<p><b>Step 1:</b> Record with your microphone.</p>"
171
-
172
-
173
- )
174
-
175
- audio_file = gr.Audio(sources=["microphone"], type="filepath")
176
-
177
-
178
- with gr.Column():
179
-
180
- gr.HTML("<p><b>Step 3:</b> Run the model.</p>")
181
-
182
- go_button = gr.Button(
183
- value="Run model",
184
- variant="primary", # make "primary" so it stands out (default is "secondary")
185
- )
186
-
187
- model_output_text_box = gr.Textbox(
188
- label="Model Output",
189
- elem_id="model_output_text_box",
190
- )
191
-
192
- with gr.Row():
193
-
194
- gr.HTML(
195
- "<p style='text-align: center'>"
196
- "🐤 <a href='https://huggingface.co/nvidia/canary-1b' target='_blank'>Canary model</a> | "
197
- "🧑‍💻 <a href='https://github.com/NVIDIA/NeMo' target='_blank'>NeMo Repository</a>"
198
- "</p>"
199
- )
200
-
201
- go_button.click(
202
- fn=transcribe,
203
- inputs = [audio_file],
204
- outputs = [model_output_text_box]
205
- )
206
-
207
-
208
- demo.queue()
209
- demo.launch()
210
-
211
-
212
-
213
-
214
- # Function to capture audio using Canary ASR
215
- def capture_audio():
216
- utt_id = uuid.uuid4()
217
- with tempfile.TemporaryDirectory() as tmpdir:
218
- converted_audio_filepath, duration = convert_audio(audio_filepath, tmpdir, str(utt_id))
219
-
220
- manifest_data = {
221
- "audio_filepath": converted_audio_filepath,
222
- "source_lang": "en",
223
- "target_lang": "en",
224
- "taskname": taskname,
225
- "pnc": pnc,
226
- "answer": "predict",
227
- "duration": 10,
228
- }
229
-
230
- manifest_filepath = os.path.join(tmpdir, f'{utt_id}.json')
231
-
232
- print("Listening for cue words...")
233
- while True:
234
- audio_input = asr_pipeline(None)[0]['input_values']
235
- transcript = asr_pipeline(audio_input)[0]['transcription']
236
- if "hey canary" in transcript.lower():
237
- print("Cue word detected!")
238
- break
239
- print("Listening...")
240
- return audio_input
241
-
242
- # AI assistant function
243
- def ai_assistant(audio_input):
244
- # Perform automatic speech recognition (ASR)
245
- transcript = asr_pipeline(audio_input)[0]['transcription']
246
-
247
- # Perform question answering (QA)
248
- qa_result = qa_pipeline(question=transcript, context="Insert your context here")
249
-
250
- # Convert the QA result to speech using text-to-speech (TTS)
251
- tts_output = tts_pipeline(qa_result['answer'])
252
-
253
- return tts_output[0]['audio']
254
-
255
- if __name__ == "__main__":
256
- # Create a Gradio interface
257
- gr.Interface(ai_assistant,
258
- inputs=gr.inputs.Audio(capture=capture_audio, label="Speak Here"),
259
- outputs=gr.outputs.Audio(type="audio", label="Assistant's Response"),
260
- title="AI Assistant",
261
- description="An AI Assistant that answers questions based on your speech input.").launch()
262
- '''
 
14
 
15
  return transcription[0]
16
 
17
+ audio_input = gr.inputs.Audio()
18
 
19
  iface = gr.Interface(transcribe, audio_input, "text", title="ASR with NeMo Canary Model")
20
  iface.launch()
21