Spaces:
Runtime error
Runtime error
The app.py interface has been re-laid out using Blocks
Browse files- README.md +4 -4
- app.py +69 -43
- requirements-fasterWhisper.txt +1 -1
- requirements-whisper.txt +1 -1
- requirements.txt +1 -1
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title: Whisper Webui
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
1 |
---
|
2 |
+
title: Faster Whisper Webui
|
3 |
+
emoji: ✨
|
4 |
+
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.36.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from datetime import datetime
|
2 |
import json
|
3 |
import math
|
4 |
from typing import Iterator, Union
|
@@ -526,52 +526,78 @@ def create_ui(app_config: ApplicationConfig):
|
|
526 |
gr.Checkbox(label="Word Timestamps - Highlight Words", value=app_config.highlight_words),
|
527 |
]
|
528 |
|
529 |
-
|
530 |
-
|
531 |
-
simple_transcribe = gr.Interface(fn=ui.transcribe_webui_simple_progress if is_queue_mode else ui.transcribe_webui_simple,
|
532 |
-
description=ui_description, article=ui_article, inputs=[
|
533 |
-
*common_inputs(),
|
534 |
-
*common_vad_inputs(),
|
535 |
-
*common_word_timestamps_inputs(),
|
536 |
-
], outputs=[
|
537 |
gr.File(label="Download"),
|
538 |
-
gr.Text(label="Transcription"),
|
539 |
-
gr.Text(label="Segments")
|
540 |
-
]
|
541 |
|
542 |
-
|
543 |
|
544 |
-
|
545 |
-
description=full_description, article=ui_article, inputs=[
|
546 |
-
*common_inputs(),
|
547 |
|
548 |
-
|
549 |
-
gr.
|
550 |
-
gr.
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
gr.
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
gr.
|
573 |
-
gr.
|
574 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
575 |
|
576 |
demo = gr.TabbedInterface([simple_transcribe, full_transcribe], tab_names=["Simple", "Full"])
|
577 |
|
|
|
1 |
+
from datetime import datetime
|
2 |
import json
|
3 |
import math
|
4 |
from typing import Iterator, Union
|
|
|
526 |
gr.Checkbox(label="Word Timestamps - Highlight Words", value=app_config.highlight_words),
|
527 |
]
|
528 |
|
529 |
+
common_output = lambda : [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
530 |
gr.File(label="Download"),
|
531 |
+
gr.Text(label="Transcription"),
|
532 |
+
gr.Text(label="Segments"),
|
533 |
+
]
|
534 |
|
535 |
+
is_queue_mode = app_config.queue_concurrency_count is not None and app_config.queue_concurrency_count > 0
|
536 |
|
537 |
+
simple_callback = gr.CSVLogger()
|
|
|
|
|
538 |
|
539 |
+
with gr.Blocks() as simple_transcribe:
|
540 |
+
gr.Markdown(ui_description)
|
541 |
+
with gr.Row():
|
542 |
+
with gr.Column():
|
543 |
+
simple_submit = gr.Button("Submit", variant="primary")
|
544 |
+
simple_input = common_inputs() + common_vad_inputs() + common_word_timestamps_inputs()
|
545 |
+
with gr.Column():
|
546 |
+
simple_output = common_output()
|
547 |
+
simple_flag = gr.Button("Flag")
|
548 |
+
gr.Markdown(ui_article)
|
549 |
+
|
550 |
+
# This needs to be called at some point prior to the first call to callback.flag()
|
551 |
+
simple_callback.setup(simple_input + simple_output, "flagged")
|
552 |
+
|
553 |
+
simple_submit.click(fn=ui.transcribe_webui_simple_progress if is_queue_mode else ui.transcribe_webui_simple,
|
554 |
+
inputs=simple_input, outputs=simple_output)
|
555 |
+
# We can choose which components to flag -- in this case, we'll flag all of them
|
556 |
+
simple_flag.click(lambda *args: print("simple_callback.flag...") or simple_callback.flag(args), simple_input + simple_output, None, preprocess=False)
|
557 |
+
|
558 |
+
full_description = ui_description + "\n\n\n\n" + "Be careful when changing some of the options in the full interface - this can cause the model to crash."
|
559 |
+
|
560 |
+
full_callback = gr.CSVLogger()
|
561 |
+
|
562 |
+
with gr.Blocks() as full_transcribe:
|
563 |
+
gr.Markdown(full_description)
|
564 |
+
with gr.Row():
|
565 |
+
with gr.Column():
|
566 |
+
full_submit = gr.Button("Submit", variant="primary")
|
567 |
+
full_input1 = common_inputs() + common_vad_inputs() + [
|
568 |
+
gr.Number(label="VAD - Padding (s)", precision=None, value=app_config.vad_padding),
|
569 |
+
gr.Number(label="VAD - Prompt Window (s)", precision=None, value=app_config.vad_prompt_window),
|
570 |
+
gr.Dropdown(choices=VAD_INITIAL_PROMPT_MODE_VALUES, label="VAD - Initial Prompt Mode")]
|
571 |
+
|
572 |
+
full_input2 = common_word_timestamps_inputs() + [
|
573 |
+
gr.Text(label="Word Timestamps - Prepend Punctuations", value=app_config.prepend_punctuations),
|
574 |
+
gr.Text(label="Word Timestamps - Append Punctuations", value=app_config.append_punctuations),
|
575 |
+
gr.TextArea(label="Initial Prompt"),
|
576 |
+
gr.Number(label="Temperature", value=app_config.temperature),
|
577 |
+
gr.Number(label="Best Of - Non-zero temperature", value=app_config.best_of, precision=0),
|
578 |
+
gr.Number(label="Beam Size - Zero temperature", value=app_config.beam_size, precision=0),
|
579 |
+
gr.Number(label="Patience - Zero temperature", value=app_config.patience),
|
580 |
+
gr.Number(label="Length Penalty - Any temperature", value=app_config.length_penalty),
|
581 |
+
gr.Text(label="Suppress Tokens - Comma-separated list of token IDs", value=app_config.suppress_tokens),
|
582 |
+
gr.Checkbox(label="Condition on previous text", value=app_config.condition_on_previous_text),
|
583 |
+
gr.Checkbox(label="FP16", value=app_config.fp16),
|
584 |
+
gr.Number(label="Temperature increment on fallback", value=app_config.temperature_increment_on_fallback),
|
585 |
+
gr.Number(label="Compression ratio threshold", value=app_config.compression_ratio_threshold),
|
586 |
+
gr.Number(label="Logprob threshold", value=app_config.logprob_threshold),
|
587 |
+
gr.Number(label="No speech threshold", value=app_config.no_speech_threshold)]
|
588 |
+
|
589 |
+
with gr.Column():
|
590 |
+
full_output = common_output()
|
591 |
+
full_flag = gr.Button("Flag")
|
592 |
+
gr.Markdown(ui_article)
|
593 |
+
|
594 |
+
# This needs to be called at some point prior to the first call to callback.flag()
|
595 |
+
full_callback.setup(full_input1 + full_input2 + full_output, "flagged")
|
596 |
+
|
597 |
+
full_submit.click(fn=ui.transcribe_webui_full_progress if is_queue_mode else ui.transcribe_webui_full,
|
598 |
+
inputs=full_input1+full_input2, outputs=full_output)
|
599 |
+
# We can choose which components to flag -- in this case, we'll flag all of them
|
600 |
+
full_flag.click(lambda *args: print("full_callback.flag...") or full_callback.flag(args), full_input1 + full_input2 + full_output, None, preprocess=False)
|
601 |
|
602 |
demo = gr.TabbedInterface([simple_transcribe, full_transcribe], tab_names=["Simple", "Full"])
|
603 |
|
requirements-fasterWhisper.txt
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
ctranslate2
|
2 |
faster-whisper
|
3 |
ffmpeg-python==0.2.0
|
4 |
-
gradio==3.
|
5 |
yt-dlp
|
6 |
json5
|
7 |
torch
|
|
|
1 |
ctranslate2
|
2 |
faster-whisper
|
3 |
ffmpeg-python==0.2.0
|
4 |
+
gradio==3.36.0
|
5 |
yt-dlp
|
6 |
json5
|
7 |
torch
|
requirements-whisper.txt
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
git+https://github.com/openai/whisper.git
|
3 |
transformers
|
4 |
ffmpeg-python==0.2.0
|
5 |
-
gradio==3.
|
6 |
yt-dlp
|
7 |
torchaudio
|
8 |
altair
|
|
|
2 |
git+https://github.com/openai/whisper.git
|
3 |
transformers
|
4 |
ffmpeg-python==0.2.0
|
5 |
+
gradio==3.36.0
|
6 |
yt-dlp
|
7 |
torchaudio
|
8 |
altair
|
requirements.txt
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
ctranslate2
|
2 |
faster-whisper
|
3 |
ffmpeg-python==0.2.0
|
4 |
-
gradio==3.
|
5 |
yt-dlp
|
6 |
json5
|
7 |
torch
|
|
|
1 |
ctranslate2
|
2 |
faster-whisper
|
3 |
ffmpeg-python==0.2.0
|
4 |
+
gradio==3.36.0
|
5 |
yt-dlp
|
6 |
json5
|
7 |
torch
|