drewThomasson
commited on
added Model and dataset download button
Browse files
app.py
CHANGED
@@ -41,6 +41,24 @@ def clear_gpu_cache():
|
|
41 |
torch.cuda.empty_cache()
|
42 |
|
43 |
XTTS_MODEL = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
def load_model(xtts_checkpoint, xtts_config, xtts_vocab,xtts_speaker):
|
45 |
global XTTS_MODEL
|
46 |
clear_gpu_cache()
|
@@ -586,6 +604,12 @@ if __name__ == "__main__":
|
|
586 |
value=False,
|
587 |
)
|
588 |
tts_btn = gr.Button(value="Step 4 - Inference")
|
|
|
|
|
|
|
|
|
|
|
|
|
589 |
|
590 |
with gr.Column() as col3:
|
591 |
progress_gen = gr.Label(
|
@@ -594,6 +618,7 @@ if __name__ == "__main__":
|
|
594 |
tts_output_audio = gr.Audio(label="Generated Audio.")
|
595 |
reference_audio = gr.Audio(label="Reference audio used.")
|
596 |
|
|
|
597 |
prompt_compute_btn.click(
|
598 |
fn=preprocess_dataset,
|
599 |
inputs=[
|
@@ -688,6 +713,18 @@ if __name__ == "__main__":
|
|
688 |
outputs=[progress_load,xtts_checkpoint,xtts_config,xtts_vocab,xtts_speaker,speaker_reference_audio],
|
689 |
)
|
690 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
691 |
demo.launch(
|
692 |
share=args.share,
|
693 |
debug=False,
|
|
|
41 |
torch.cuda.empty_cache()
|
42 |
|
43 |
XTTS_MODEL = None
|
44 |
+
|
45 |
+
def create_zip(folder_path, zip_name):
|
46 |
+
zip_path = os.path.join(tempfile.gettempdir(), f"{zip_name}.zip")
|
47 |
+
shutil.make_archive(zip_path.replace('.zip', ''), 'zip', folder_path)
|
48 |
+
return zip_path
|
49 |
+
|
50 |
+
def get_model_zip(out_path):
|
51 |
+
ready_folder = os.path.join(out_path, "ready")
|
52 |
+
if os.path.exists(ready_folder):
|
53 |
+
return create_zip(ready_folder, "optimized_model")
|
54 |
+
return None
|
55 |
+
|
56 |
+
def get_dataset_zip(out_path):
|
57 |
+
dataset_folder = os.path.join(out_path, "dataset")
|
58 |
+
if os.path.exists(dataset_folder):
|
59 |
+
return create_zip(dataset_folder, "dataset")
|
60 |
+
return None
|
61 |
+
|
62 |
def load_model(xtts_checkpoint, xtts_config, xtts_vocab,xtts_speaker):
|
63 |
global XTTS_MODEL
|
64 |
clear_gpu_cache()
|
|
|
604 |
value=False,
|
605 |
)
|
606 |
tts_btn = gr.Button(value="Step 4 - Inference")
|
607 |
+
|
608 |
+
model_download_btn = gr.Button("Step 5 - Download Optimized Model ZIP")
|
609 |
+
dataset_download_btn = gr.Button("Step 5 - Download Dataset ZIP")
|
610 |
+
|
611 |
+
model_zip_file = gr.File(label="Download Optimized Model", interactive=False)
|
612 |
+
dataset_zip_file = gr.File(label="Download Dataset", interactive=False)
|
613 |
|
614 |
with gr.Column() as col3:
|
615 |
progress_gen = gr.Label(
|
|
|
618 |
tts_output_audio = gr.Audio(label="Generated Audio.")
|
619 |
reference_audio = gr.Audio(label="Reference audio used.")
|
620 |
|
621 |
+
|
622 |
prompt_compute_btn.click(
|
623 |
fn=preprocess_dataset,
|
624 |
inputs=[
|
|
|
713 |
outputs=[progress_load,xtts_checkpoint,xtts_config,xtts_vocab,xtts_speaker,speaker_reference_audio],
|
714 |
)
|
715 |
|
716 |
+
model_download_btn.click(
|
717 |
+
fn=get_model_zip,
|
718 |
+
inputs=[out_path],
|
719 |
+
outputs=[model_zip_file]
|
720 |
+
)
|
721 |
+
|
722 |
+
dataset_download_btn.click(
|
723 |
+
fn=get_dataset_zip,
|
724 |
+
inputs=[out_path],
|
725 |
+
outputs=[dataset_zip_file]
|
726 |
+
)
|
727 |
+
|
728 |
demo.launch(
|
729 |
share=args.share,
|
730 |
debug=False,
|