Update gradio_app.py
Browse files- gradio_app.py +6 -18
gradio_app.py
CHANGED
@@ -260,18 +260,6 @@ def get_image(model, prompts, image1, image2, noise_shape, n_samples=1, ddim_ste
|
|
260 |
|
261 |
|
262 |
|
263 |
-
# i2v_examples_interp_1024 = [
|
264 |
-
# ['prompts/1024_interp/frame_000000.jpg', 'prompts/1024_interp/frame_000041.jpg', 'a cat is eating', 50, 7.5, 1.0, 10, 123]
|
265 |
-
# ]
|
266 |
-
|
267 |
-
i2v_examples_interp_1024 = [
|
268 |
-
['prompts/1024_interp/hall_first.jpg', 'prompts/1024_interp/hall_sketch.jpg',
|
269 |
-
'At the start, a still image of a wooden hallway with arched arches, doors, and various furniture. The scene then transitions to an animated version of the hallway, showcasing more details like a bookshelf and a window.',
|
270 |
-
50, 7.5, 1.0, 10, 123]
|
271 |
-
]
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
|
276 |
def dynamicrafter_demo(result_dir='./tmp/', res=1024):
|
277 |
if res == 1024:
|
@@ -319,12 +307,12 @@ def dynamicrafter_demo(result_dir='./tmp/', res=1024):
|
|
319 |
# unconditional_guidance_scale=1.0, cfg_img=None, fs=None, text_input=False, multiple_cond_cfg=False, \
|
320 |
# loop=False, interp=False, timestep_spacing='uniform', guidance_rescale=0.0, **kwargs):
|
321 |
|
322 |
-
gr.Examples(examples=i2v_examples_interp_1024,
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
)
|
328 |
i2v_end_btn.click(inputs=[i2v_input_image, i2v_input_text, i2v_input_image, i2v_input_image2, [72, 108], 1, i2v_steps, i2v_eta, 1.0, None, i2v_motion, i2v_seed],
|
329 |
outputs=[i2v_output_video],
|
330 |
fn = get_image
|
|
|
260 |
|
261 |
|
262 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
|
264 |
def dynamicrafter_demo(result_dir='./tmp/', res=1024):
|
265 |
if res == 1024:
|
|
|
307 |
# unconditional_guidance_scale=1.0, cfg_img=None, fs=None, text_input=False, multiple_cond_cfg=False, \
|
308 |
# loop=False, interp=False, timestep_spacing='uniform', guidance_rescale=0.0, **kwargs):
|
309 |
|
310 |
+
# gr.Examples(examples=i2v_examples_interp_1024,
|
311 |
+
# inputs=[i2v_input_image, i2v_input_text, i2v_input_image, i2v_input_image2, [72, 108], 1, i2v_steps, i2v_eta, 1.0, None, i2v_motion, i2v_seed],
|
312 |
+
# outputs=[i2v_output_video],
|
313 |
+
# fn = get_image,
|
314 |
+
# cache_examples=False,
|
315 |
+
# )
|
316 |
i2v_end_btn.click(inputs=[i2v_input_image, i2v_input_text, i2v_input_image, i2v_input_image2, [72, 108], 1, i2v_steps, i2v_eta, 1.0, None, i2v_motion, i2v_seed],
|
317 |
outputs=[i2v_output_video],
|
318 |
fn = get_image
|