test1 / app.py
AngeT10's picture
Create app.py
131bc91 verified
raw
history blame
2.02 kB
import gradio as gr
import subprocess
import os
# Path to save the generated video
output_dir = './results'
# Ensure the output directory exists
os.makedirs(output_dir, exist_ok=True)
# Define the function to generate the video
def generate_video(prompt, video_size, video_length, infer_steps, seed, save_path):
# Command to run the HunyuanVideo script
command = [
"python3", "sample_video.py",
"--prompt", prompt,
"--video-size", video_size,
"--video-length", str(video_length),
"--infer-steps", str(infer_steps),
"--seed", str(seed),
"--save-path", save_path
]
# Run the video generation process
try:
subprocess.run(command, check=True)
# Return the path to the generated video file
generated_video_path = os.path.join(save_path, "generated_video.mp4")
return generated_video_path
except subprocess.CalledProcessError as e:
return f"Error generating video: {e}"
# Create the Gradio interface
def create_interface():
# Define input components
prompt_input = gr.Textbox(label="Prompt", placeholder="Enter the prompt for the video.")
video_size_input = gr.Textbox(label="Video Size", value="720 1280")
video_length_input = gr.Slider(label="Video Length", minimum=1, maximum=200, value=129)
infer_steps_input = gr.Slider(label="Inference Steps", minimum=1, maximum=100, value=30)
seed_input = gr.Slider(label="Seed", minimum=0, maximum=1000, value=0)
save_path_input = gr.Textbox(label="Save Path", value=output_dir)
# Define output component
output_video = gr.Video(label="Generated Video")
# Create Gradio interface
interface = gr.Interface(
fn=generate_video,
inputs=[prompt_input, video_size_input, video_length_input, infer_steps_input, seed_input, save_path_input],
outputs=[output_video],
live=True
)
# Launch the interface
interface.launch()
if __name__ == "__main__":
create_interface()