Spaces:
Runtime error
Runtime error
File size: 3,358 Bytes
714bf26 d76fbc2 714bf26 93447a6 714bf26 7fe2981 714bf26 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import gradio as gr
from model import Model
import os
def create_demo(model: Model):
examples = [
['__assets__/pix2pix video/camel.mp4', 'make it Van Gogh Starry Night style'],
['__assets__/pix2pix video/mini-cooper.mp4', 'make it Picasso style'],
['__assets__/pix2pix video/snowboard.mp4', 'replace man with robot'],
['__assets__/pix2pix video/white-swan.mp4', 'replace swan with mallard'],
]
with gr.Blocks() as demo:
with gr.Row():
gr.Markdown('## Video Instruct Pix2Pix')
gr.Markdown('#### Description: Our current preview release supports arbitrary length videos, and for performance purposes the video is scaled down before processing. For faster inference you can choose lower output frames per seconds from Advanced Options.')
with gr.Row():
with gr.Column():
input_image = gr.Video(label="Input Video",source='upload', type='numpy', format="mp4", visible=True).style(height="auto")
with gr.Column():
prompt = gr.Textbox(label='Prompt')
run_button = gr.Button(label='Run')
with gr.Accordion('Advanced options', open=False):
image_resolution = gr.Slider(label='Image Resolution',
minimum=256,
maximum=1024,
value=512,
step=64)
seed = gr.Slider(label='Seed',
minimum=0,
maximum=65536,
value=0,
step=1)
start_t = gr.Slider(label='Starting time in seconds',
minimum=0,
maximum=10,
value=0,
step=1)
end_t = gr.Slider(label='End time in seconds (-1 corresponds to uploaded video duration)',
minimum=0,
maximum=10,
value=-1,
step=1)
out_fps = gr.Slider(label='Output video fps (-1 corresponds to uploaded video fps)',
minimum=1,
maximum=30,
value=-1,
step=1)
with gr.Column():
result = gr.Video(label='Output',
show_label=True)
inputs = [
input_image,
prompt,
image_resolution,
seed,
start_t,
end_t,
out_fps
]
gr.Examples(examples=examples,
inputs=inputs,
outputs=result,
# cache_examples=os.getenv('SYSTEM') == 'spaces',
run_on_click=False,
)
run_button.click(fn=model.process_pix2pix,
inputs=inputs,
outputs=result)
return demo
|