Spaces:
Sleeping
Sleeping
File size: 2,198 Bytes
b71b6bf 466df4f b71b6bf 0e7972e b71b6bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import gradio as gr
import huggingface_hub
import os
import subprocess
import threading
# download model
huggingface_hub.snapshot_download(
repo_id='ariesssxu/vta-ldm-clip4clip-v-large',
local_dir='./ckpt'
)
def stream_output(pipe):
for line in iter(pipe.readline, ''):
print(line, end='')
def print_directory_contents(path):
for root, dirs, files in os.walk(path):
level = root.replace(path, '').count(os.sep)
indent = ' ' * 4 * (level)
print(f"{indent}{os.path.basename(root)}/")
subindent = ' ' * 4 * (level + 1)
for f in files:
print(f"{subindent}{f}")
def infer(video_in):
# Need to find path to gradio temp vid from video input
# path_to_video
print(f"VIDEO IN PATH: {video_in}")
# Execute the inference command
command = ['python', 'inference_from_video.py', '--data_path', video_in]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1)
# Create threads to handle stdout and stderr
stdout_thread = threading.Thread(target=stream_output, args=(process.stdout,))
stderr_thread = threading.Thread(target=stream_output, args=(process.stderr,))
# Start the threads
stdout_thread.start()
stderr_thread.start()
# Wait for the process to complete and the threads to finish
process.wait()
stdout_thread.join()
stderr_thread.join()
print("Inference script finished with return code:", process.returncode)
# Need to find where are the results stored, default should be "./outputs/tmp"
# Print the outputs directory contents
print_directory_contents('./outputs/tmp')
return "done"
with gr.Blocks() as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# Video-To-Audio")
video_in = gr.Video(label='Video IN')
submit_btn = gr.Button("Submit")
#output_sound = gr.Audio(label="Audio OUT")
output_sound = gr.Textbox(label="Audio OUT")
submit_btn.click(
fn = infer,
inputs = [video_in],
outputs = [output_sound],
show_api = False
)
demo.launch(show_api=False, show_error=True) |