Spaces:
Sleeping
Sleeping
bofenghuang
commited on
Commit
·
ecd55e0
1
Parent(s):
392be49
add timestamp option to yt
Browse files
run_demo_low_api_openai.py
CHANGED
@@ -180,7 +180,7 @@ def yt_transcribe(yt_url, with_timestamps, model_name=DEFAULT_MODEL_NAME):
|
|
180 |
return text
|
181 |
|
182 |
|
183 |
-
def video_transcribe(video_file_path, model_name=DEFAULT_MODEL_NAME):
|
184 |
if video_file_path is None:
|
185 |
raise ValueError("Failed to transcribe video as no video_file_path has been defined")
|
186 |
|
@@ -189,7 +189,7 @@ def video_transcribe(video_file_path, model_name=DEFAULT_MODEL_NAME):
|
|
189 |
|
190 |
model = maybe_load_cached_pipeline(model_name)
|
191 |
# text = model.transcribe("audio.mp3", **GEN_KWARGS)["text"]
|
192 |
-
text = infer(model, audio_file_path, with_timestamps
|
193 |
|
194 |
logger.info(f'Transcription by `{model_name}`:\n{text.to_json(orient="index", force_ascii=False, indent=2)}\n')
|
195 |
|
@@ -287,6 +287,7 @@ with gr.Blocks() as demo:
|
|
287 |
downloaded_video_output = gr.Video(label="Video file", mirror_webcam=False)
|
288 |
download_youtube_btn.click(download_video_from_youtube, inputs=[yt_link_input], outputs=[downloaded_video_output])
|
289 |
|
|
|
290 |
video_transcribe_btn = gr.Button("Transcribe video")
|
291 |
text_output_df = gr.DataFrame(
|
292 |
value=default_text_output_df,
|
@@ -297,7 +298,7 @@ with gr.Blocks() as demo:
|
|
297 |
overflow_row_behaviour="paginate",
|
298 |
)
|
299 |
|
300 |
-
video_transcribe_btn.click(video_transcribe, inputs=[downloaded_video_output], outputs=[text_output_df])
|
301 |
|
302 |
# demo.launch(server_name="0.0.0.0", debug=True)
|
303 |
# demo.launch(server_name="0.0.0.0", debug=True, share=True)
|
|
|
180 |
return text
|
181 |
|
182 |
|
183 |
+
def video_transcribe(video_file_path, with_timestamps, model_name=DEFAULT_MODEL_NAME):
|
184 |
if video_file_path is None:
|
185 |
raise ValueError("Failed to transcribe video as no video_file_path has been defined")
|
186 |
|
|
|
189 |
|
190 |
model = maybe_load_cached_pipeline(model_name)
|
191 |
# text = model.transcribe("audio.mp3", **GEN_KWARGS)["text"]
|
192 |
+
text = infer(model, audio_file_path, with_timestamps, return_df=True)
|
193 |
|
194 |
logger.info(f'Transcription by `{model_name}`:\n{text.to_json(orient="index", force_ascii=False, indent=2)}\n')
|
195 |
|
|
|
287 |
downloaded_video_output = gr.Video(label="Video file", mirror_webcam=False)
|
288 |
download_youtube_btn.click(download_video_from_youtube, inputs=[yt_link_input], outputs=[downloaded_video_output])
|
289 |
|
290 |
+
with_timestamps_input3 = gr.Checkbox(label="With timestamps?", value=True)
|
291 |
video_transcribe_btn = gr.Button("Transcribe video")
|
292 |
text_output_df = gr.DataFrame(
|
293 |
value=default_text_output_df,
|
|
|
298 |
overflow_row_behaviour="paginate",
|
299 |
)
|
300 |
|
301 |
+
video_transcribe_btn.click(video_transcribe, inputs=[downloaded_video_output, with_timestamps_input3], outputs=[text_output_df])
|
302 |
|
303 |
# demo.launch(server_name="0.0.0.0", debug=True)
|
304 |
# demo.launch(server_name="0.0.0.0", debug=True, share=True)
|