IRISLAB commited on
Commit
0a9bde8
β€’
1 Parent(s): 7683fdb

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +358 -0
app.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import argparse
4
+
5
+ from modules.whisper_Inference import WhisperInference
6
+ from modules.faster_whisper_inference import FasterWhisperInference
7
+ from modules.nllb_inference import NLLBInference
8
+ from ui.htmls import *
9
+ from modules.youtube_manager import get_ytmetas
10
+ from modules.deepl_api import DeepLAPI
11
+ from modules.whisper_parameter import *
12
+
13
+
14
+ class App:
15
+ def __init__(self, args):
16
+ self.args = args
17
+ self.app = gr.Blocks(css=CSS, theme=self.args.theme)
18
+ self.whisper_inf = self.init_whisper()
19
+ print(f"Use \"{self.args.whisper_type}\" implementation")
20
+ print(f"Device \"{self.whisper_inf.device}\" is detected")
21
+ self.nllb_inf = NLLBInference()
22
+ self.deepl_api = DeepLAPI()
23
+
24
+ def init_whisper(self):
25
+ whisper_type = self.args.whisper_type.lower().strip()
26
+
27
+ if whisper_type in ["faster_whisper", "faster-whisper"]:
28
+ whisper_inf = FasterWhisperInference()
29
+ whisper_inf.model_dir = self.args.faster_whisper_model_dir
30
+ if whisper_type in ["whisper"]:
31
+ whisper_inf = WhisperInference()
32
+ whisper_inf.model_dir = self.args.whisper_model_dir
33
+ else:
34
+ whisper_inf = FasterWhisperInference()
35
+ whisper_inf.model_dir = self.args.faster_whisper_model_dir
36
+ return whisper_inf
37
+
38
+ @staticmethod
39
+ def open_folder(folder_path: str):
40
+ if os.path.exists(folder_path):
41
+ os.system(f"start {folder_path}")
42
+ else:
43
+ print(f"The folder {folder_path} does not exist.")
44
+
45
+ @staticmethod
46
+ def on_change_models(model_size: str):
47
+ translatable_model = ["large", "large-v1", "large-v2", "large-v3"]
48
+ if model_size not in translatable_model:
49
+ return gr.Checkbox(visible=False, value=False, interactive=False)
50
+ else:
51
+ return gr.Checkbox(visible=True, value=False, label="Translate to English?", interactive=True)
52
+
53
+ def launch(self):
54
+ with self.app:
55
+ with gr.Row():
56
+ with gr.Column():
57
+ gr.Markdown(MARKDOWN, elem_id="md_project")
58
+ with gr.Tabs():
59
+ with gr.TabItem("File"): # tab1
60
+ with gr.Row():
61
+ input_file = gr.Files(type="filepath", label="Upload File here")
62
+ with gr.Row():
63
+ dd_model = gr.Dropdown(choices=self.whisper_inf.available_models, value="large-v2",
64
+ label="Model")
65
+ dd_lang = gr.Dropdown(choices=["Automatic Detection"] + self.whisper_inf.available_langs,
66
+ value="Automatic Detection", label="Language")
67
+ dd_file_format = gr.Dropdown(["SRT", "WebVTT", "txt"], value="SRT", label="File Format")
68
+ with gr.Row():
69
+ cb_translate = gr.Checkbox(value=False, label="Translate to English?", interactive=True)
70
+ with gr.Row():
71
+ cb_timestamp = gr.Checkbox(value=True, label="Add a timestamp to the end of the filename", interactive=True)
72
+ with gr.Accordion("VAD Options", open=False, visible=isinstance(self.whisper_inf, FasterWhisperInference)):
73
+ cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=False, interactive=True)
74
+ sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold", value=0.5)
75
+ nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0, value=250)
76
+ nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)", value=9999)
77
+ nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0, value=2000)
78
+ nb_window_size_sample = gr.Number(label="Window Size (samples)", precision=0, value=1024)
79
+ nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=400)
80
+ with gr.Accordion("Advanced_Parameters", open=False):
81
+ nb_beam_size = gr.Number(label="Beam Size", value=1, precision=0, interactive=True)
82
+ nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=-1.0, interactive=True)
83
+ nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=0.6, interactive=True)
84
+ dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types, value=self.whisper_inf.current_compute_type, interactive=True)
85
+ nb_best_of = gr.Number(label="Best Of", value=5, interactive=True)
86
+ nb_patience = gr.Number(label="Patience", value=1, interactive=True)
87
+ cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=True, interactive=True)
88
+ tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True)
89
+ sd_temperature = gr.Slider(label="Temperature", value=0, step=0.01, maximum=1.0, interactive=True)
90
+ nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=2.4, interactive=True)
91
+ with gr.Row():
92
+ btn_run = gr.Button("GENERATE SUBTITLE FILE", variant="primary")
93
+ with gr.Row():
94
+ tb_indicator = gr.Textbox(label="Output", scale=5)
95
+ files_subtitles = gr.Files(label="Downloadable output file", scale=3, interactive=False)
96
+ btn_openfolder = gr.Button('πŸ“‚', scale=1)
97
+
98
+ params = [input_file, dd_file_format, cb_timestamp]
99
+ whisper_params = WhisperGradioComponents(model_size=dd_model,
100
+ lang=dd_lang,
101
+ is_translate=cb_translate,
102
+ beam_size=nb_beam_size,
103
+ log_prob_threshold=nb_log_prob_threshold,
104
+ no_speech_threshold=nb_no_speech_threshold,
105
+ compute_type=dd_compute_type,
106
+ best_of=nb_best_of,
107
+ patience=nb_patience,
108
+ condition_on_previous_text=cb_condition_on_previous_text,
109
+ initial_prompt=tb_initial_prompt,
110
+ temperature=sd_temperature,
111
+ compression_ratio_threshold=nb_compression_ratio_threshold,
112
+ vad_filter=cb_vad_filter,
113
+ threshold=sd_threshold,
114
+ min_speech_duration_ms=nb_min_speech_duration_ms,
115
+ max_speech_duration_s=nb_max_speech_duration_s,
116
+ min_silence_duration_ms=nb_min_silence_duration_ms,
117
+ window_size_sample=nb_window_size_sample,
118
+ speech_pad_ms=nb_speech_pad_ms)
119
+
120
+ btn_run.click(fn=self.whisper_inf.transcribe_file,
121
+ inputs=params + whisper_params.to_list(),
122
+ outputs=[tb_indicator, files_subtitles])
123
+ btn_openfolder.click(fn=lambda: self.open_folder("outputs"), inputs=None, outputs=None)
124
+ dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
125
+
126
+ with gr.TabItem("Youtube"): # tab2
127
+ with gr.Row():
128
+ tb_youtubelink = gr.Textbox(label="Youtube Link")
129
+ with gr.Row(equal_height=True):
130
+ with gr.Column():
131
+ img_thumbnail = gr.Image(label="Youtube Thumbnail")
132
+ with gr.Column():
133
+ tb_title = gr.Label(label="Youtube Title")
134
+ tb_description = gr.Textbox(label="Youtube Description", max_lines=15)
135
+ with gr.Row():
136
+ dd_model = gr.Dropdown(choices=self.whisper_inf.available_models, value="large-v2",
137
+ label="Model")
138
+ dd_lang = gr.Dropdown(choices=["Automatic Detection"] + self.whisper_inf.available_langs,
139
+ value="Automatic Detection", label="Language")
140
+ dd_file_format = gr.Dropdown(choices=["SRT", "WebVTT", "txt"], value="SRT", label="File Format")
141
+ with gr.Row():
142
+ cb_translate = gr.Checkbox(value=False, label="Translate to English?", interactive=True)
143
+ with gr.Row():
144
+ cb_timestamp = gr.Checkbox(value=True, label="Add a timestamp to the end of the filename",
145
+ interactive=True)
146
+ with gr.Accordion("VAD Options", open=False, visible=isinstance(self.whisper_inf, FasterWhisperInference)):
147
+ cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=False, interactive=True)
148
+ sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold", value=0.5)
149
+ nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0, value=250)
150
+ nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)", value=9999)
151
+ nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0, value=2000)
152
+ nb_window_size_sample = gr.Number(label="Window Size (samples)", precision=0, value=1024)
153
+ nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=400)
154
+ with gr.Accordion("Advanced_Parameters", open=False):
155
+ nb_beam_size = gr.Number(label="Beam Size", value=1, precision=0, interactive=True)
156
+ nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=-1.0, interactive=True)
157
+ nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=0.6, interactive=True)
158
+ dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types, value=self.whisper_inf.current_compute_type, interactive=True)
159
+ nb_best_of = gr.Number(label="Best Of", value=5, interactive=True)
160
+ nb_patience = gr.Number(label="Patience", value=1, interactive=True)
161
+ cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=True, interactive=True)
162
+ tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True)
163
+ sd_temperature = gr.Slider(label="Temperature", value=0, step=0.01, maximum=1.0, interactive=True)
164
+ nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=2.4, interactive=True)
165
+ with gr.Row():
166
+ btn_run = gr.Button("GENERATE SUBTITLE FILE", variant="primary")
167
+ with gr.Row():
168
+ tb_indicator = gr.Textbox(label="Output", scale=5)
169
+ files_subtitles = gr.Files(label="Downloadable output file", scale=3)
170
+ btn_openfolder = gr.Button('πŸ“‚', scale=1)
171
+
172
+ params = [tb_youtubelink, dd_file_format, cb_timestamp]
173
+ whisper_params = WhisperGradioComponents(model_size=dd_model,
174
+ lang=dd_lang,
175
+ is_translate=cb_translate,
176
+ beam_size=nb_beam_size,
177
+ log_prob_threshold=nb_log_prob_threshold,
178
+ no_speech_threshold=nb_no_speech_threshold,
179
+ compute_type=dd_compute_type,
180
+ best_of=nb_best_of,
181
+ patience=nb_patience,
182
+ condition_on_previous_text=cb_condition_on_previous_text,
183
+ initial_prompt=tb_initial_prompt,
184
+ temperature=sd_temperature,
185
+ compression_ratio_threshold=nb_compression_ratio_threshold,
186
+ vad_filter=cb_vad_filter,
187
+ threshold=sd_threshold,
188
+ min_speech_duration_ms=nb_min_speech_duration_ms,
189
+ max_speech_duration_s=nb_max_speech_duration_s,
190
+ min_silence_duration_ms=nb_min_silence_duration_ms,
191
+ window_size_sample=nb_window_size_sample,
192
+ speech_pad_ms=nb_speech_pad_ms)
193
+ btn_run.click(fn=self.whisper_inf.transcribe_youtube,
194
+ inputs=params + whisper_params.to_list(),
195
+ outputs=[tb_indicator, files_subtitles])
196
+ tb_youtubelink.change(get_ytmetas, inputs=[tb_youtubelink],
197
+ outputs=[img_thumbnail, tb_title, tb_description])
198
+ btn_openfolder.click(fn=lambda: self.open_folder("outputs"), inputs=None, outputs=None)
199
+ dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
200
+
201
+ with gr.TabItem("Mic"): # tab3
202
+ with gr.Row():
203
+ mic_input = gr.Microphone(label="Record with Mic", type="filepath", interactive=True)
204
+ with gr.Row():
205
+ dd_model = gr.Dropdown(choices=self.whisper_inf.available_models, value="large-v2",
206
+ label="Model")
207
+ dd_lang = gr.Dropdown(choices=["Automatic Detection"] + self.whisper_inf.available_langs,
208
+ value="Automatic Detection", label="Language")
209
+ dd_file_format = gr.Dropdown(["SRT", "WebVTT", "txt"], value="SRT", label="File Format")
210
+ with gr.Row():
211
+ cb_translate = gr.Checkbox(value=False, label="Translate to English?", interactive=True)
212
+ with gr.Accordion("VAD Options", open=False, visible=isinstance(self.whisper_inf, FasterWhisperInference)):
213
+ cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=False, interactive=True)
214
+ sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold", value=0.5)
215
+ nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0, value=250)
216
+ nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)", value=9999)
217
+ nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0, value=2000)
218
+ nb_window_size_sample = gr.Number(label="Window Size (samples)", precision=0, value=1024)
219
+ nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=400)
220
+ with gr.Accordion("Advanced_Parameters", open=False):
221
+ nb_beam_size = gr.Number(label="Beam Size", value=1, precision=0, interactive=True)
222
+ nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=-1.0, interactive=True)
223
+ nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=0.6, interactive=True)
224
+ dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types, value=self.whisper_inf.current_compute_type, interactive=True)
225
+ nb_best_of = gr.Number(label="Best Of", value=5, interactive=True)
226
+ nb_patience = gr.Number(label="Patience", value=1, interactive=True)
227
+ cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=True, interactive=True)
228
+ tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True)
229
+ sd_temperature = gr.Slider(label="Temperature", value=0, step=0.01, maximum=1.0, interactive=True)
230
+ with gr.Row():
231
+ btn_run = gr.Button("GENERATE SUBTITLE FILE", variant="primary")
232
+ with gr.Row():
233
+ tb_indicator = gr.Textbox(label="Output", scale=5)
234
+ files_subtitles = gr.Files(label="Downloadable output file", scale=3)
235
+ btn_openfolder = gr.Button('πŸ“‚', scale=1)
236
+
237
+ params = [mic_input, dd_file_format]
238
+ whisper_params = WhisperGradioComponents(model_size=dd_model,
239
+ lang=dd_lang,
240
+ is_translate=cb_translate,
241
+ beam_size=nb_beam_size,
242
+ log_prob_threshold=nb_log_prob_threshold,
243
+ no_speech_threshold=nb_no_speech_threshold,
244
+ compute_type=dd_compute_type,
245
+ best_of=nb_best_of,
246
+ patience=nb_patience,
247
+ condition_on_previous_text=cb_condition_on_previous_text,
248
+ initial_prompt=tb_initial_prompt,
249
+ temperature=sd_temperature,
250
+ compression_ratio_threshold=nb_compression_ratio_threshold,
251
+ vad_filter=cb_vad_filter,
252
+ threshold=sd_threshold,
253
+ min_speech_duration_ms=nb_min_speech_duration_ms,
254
+ max_speech_duration_s=nb_max_speech_duration_s,
255
+ min_silence_duration_ms=nb_min_silence_duration_ms,
256
+ window_size_sample=nb_window_size_sample,
257
+ speech_pad_ms=nb_speech_pad_ms)
258
+ btn_run.click(fn=self.whisper_inf.transcribe_mic,
259
+ inputs=params + whisper_params.to_list(),
260
+ outputs=[tb_indicator, files_subtitles])
261
+ btn_openfolder.click(fn=lambda: self.open_folder("outputs"), inputs=None, outputs=None)
262
+ dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
263
+
264
+ with gr.TabItem("T2T Translation"): # tab 4
265
+ with gr.Row():
266
+ file_subs = gr.Files(type="filepath", label="Upload Subtitle Files to translate here",
267
+ file_types=['.vtt', '.srt'])
268
+
269
+ with gr.TabItem("DeepL API"): # sub tab1
270
+ with gr.Row():
271
+ tb_authkey = gr.Textbox(label="Your Auth Key (API KEY)",
272
+ value="")
273
+ with gr.Row():
274
+ dd_deepl_sourcelang = gr.Dropdown(label="Source Language", value="Automatic Detection",
275
+ choices=list(
276
+ self.deepl_api.available_source_langs.keys()))
277
+ dd_deepl_targetlang = gr.Dropdown(label="Target Language", value="English",
278
+ choices=list(
279
+ self.deepl_api.available_target_langs.keys()))
280
+ with gr.Row():
281
+ cb_deepl_ispro = gr.Checkbox(label="Pro User?", value=False)
282
+ with gr.Row():
283
+ btn_run = gr.Button("TRANSLATE SUBTITLE FILE", variant="primary")
284
+ with gr.Row():
285
+ tb_indicator = gr.Textbox(label="Output", scale=5)
286
+ files_subtitles = gr.Files(label="Downloadable output file", scale=3)
287
+ btn_openfolder = gr.Button('πŸ“‚', scale=1)
288
+
289
+ btn_run.click(fn=self.deepl_api.translate_deepl,
290
+ inputs=[tb_authkey, file_subs, dd_deepl_sourcelang, dd_deepl_targetlang,
291
+ cb_deepl_ispro],
292
+ outputs=[tb_indicator, files_subtitles])
293
+
294
+ btn_openfolder.click(fn=lambda: self.open_folder(os.path.join("outputs", "translations")),
295
+ inputs=None,
296
+ outputs=None)
297
+
298
+ with gr.TabItem("NLLB"): # sub tab2
299
+ with gr.Row():
300
+ dd_nllb_model = gr.Dropdown(label="Model", value="facebook/nllb-200-1.3B",
301
+ choices=self.nllb_inf.available_models)
302
+ dd_nllb_sourcelang = gr.Dropdown(label="Source Language",
303
+ choices=self.nllb_inf.available_source_langs)
304
+ dd_nllb_targetlang = gr.Dropdown(label="Target Language",
305
+ choices=self.nllb_inf.available_target_langs)
306
+ with gr.Row():
307
+ cb_timestamp = gr.Checkbox(value=True, label="Add a timestamp to the end of the filename",
308
+ interactive=True)
309
+ with gr.Row():
310
+ btn_run = gr.Button("TRANSLATE SUBTITLE FILE", variant="primary")
311
+ with gr.Row():
312
+ tb_indicator = gr.Textbox(label="Output", scale=5)
313
+ files_subtitles = gr.Files(label="Downloadable output file", scale=3)
314
+ btn_openfolder = gr.Button('πŸ“‚', scale=1)
315
+ with gr.Column():
316
+ md_vram_table = gr.HTML(NLLB_VRAM_TABLE, elem_id="md_nllb_vram_table")
317
+
318
+ btn_run.click(fn=self.nllb_inf.translate_file,
319
+ inputs=[file_subs, dd_nllb_model, dd_nllb_sourcelang, dd_nllb_targetlang, cb_timestamp],
320
+ outputs=[tb_indicator, files_subtitles])
321
+
322
+ btn_openfolder.click(fn=lambda: self.open_folder(os.path.join("outputs", "translations")),
323
+ inputs=None,
324
+ outputs=None)
325
+
326
+ # Launch the app with optional gradio settings
327
+ launch_args = {}
328
+ if self.args.share:
329
+ launch_args['share'] = self.args.share
330
+ if self.args.server_name:
331
+ launch_args['server_name'] = self.args.server_name
332
+ if self.args.server_port:
333
+ launch_args['server_port'] = self.args.server_port
334
+ if self.args.username and self.args.password:
335
+ launch_args['auth'] = (self.args.username, self.args.password)
336
+ launch_args['inbrowser'] = True
337
+
338
+ self.app.queue(api_open=False).launch(**launch_args)
339
+
340
+
341
+ # Create the parser for command-line arguments
342
+ parser = argparse.ArgumentParser()
343
+ parser.add_argument('--whisper_type', type=str, default="faster-whisper", help='A type of the whisper implementation between: ["whisper", "faster-whisper"]')
344
+ parser.add_argument('--share', type=bool, default=False, nargs='?', const=True, help='Gradio share value')
345
+ parser.add_argument('--server_name', type=str, default=None, help='Gradio server host')
346
+ parser.add_argument('--server_port', type=int, default=None, help='Gradio server port')
347
+ parser.add_argument('--username', type=str, default=None, help='Gradio authentication username')
348
+ parser.add_argument('--password', type=str, default=None, help='Gradio authentication password')
349
+ parser.add_argument('--theme', type=str, default=None, help='Gradio Blocks theme')
350
+ parser.add_argument('--colab', type=bool, default=False, nargs='?', const=True, help='Is colab user or not')
351
+ parser.add_argument('--api_open', type=bool, default=False, nargs='?', const=True, help='enable api or not')
352
+ parser.add_argument('--whisper_model_dir', type=str, default=os.path.join("models", "Whisper"), help='Directory path of the whisper model')
353
+ parser.add_argument('--faster_whisper_model_dir', type=str, default=os.path.join("models", "Whisper", "faster-whisper"), help='Directory path of the faster-whisper model')
354
+ _args = parser.parse_args()
355
+
356
+ if __name__ == "__main__":
357
+ app = App(args=_args)
358
+ app.launch()