maxin-cn commited on
Commit
e9a18fc
1 Parent(s): 028c3f1

Upload folder using huggingface_hub

Browse files
datasets/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (164 Bytes). View file
 
demo.py CHANGED
@@ -270,18 +270,18 @@ with gr.Blocks() as demo:
270
  input_image_path.submit(fn=update_and_resize_image, inputs=[input_image_path, height, width], outputs=[input_image])
271
 
272
  EXAMPLES = [
273
- ["./example/aircrafts_flying/0.jpg", "aircrafts flying", "", 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
274
- ["./example/fireworks/0.jpg", "fireworks", "", 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
275
- ["./example/flowers_swaying/0.jpg", "flowers swaying", "", 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
276
- ["./example/girl_walking_on_the_beach/0.jpg", "girl walking on the beach", "", 50, 320, 512, 7.5, True, 0.23, 985, 10, 200],
277
- ["./example/house_rotating/0.jpg", "house rotating", "", 50, 320, 512, 7.5, True, 0.23, 985, 10, 100],
278
- ["./example/people_runing/0.jpg", "people runing", "", 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
279
  ]
280
 
281
  examples = gr.Examples(
282
  examples = EXAMPLES,
283
  fn = gen_video,
284
- inputs=[input_image, prompt_textbox, negative_prompt_textbox, sample_step_slider, height, width, txt_cfg_scale, use_dctinit, dct_coefficients, noise_level, motion_bucket_id, seed_textbox],
285
  outputs=[result_video],
286
  # cache_examples=True,
287
  cache_examples="lazy",
 
270
  input_image_path.submit(fn=update_and_resize_image, inputs=[input_image_path, height, width], outputs=[input_image])
271
 
272
  EXAMPLES = [
273
+ ["./example/aircrafts_flying/0.jpg", "aircrafts flying" , 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
274
+ ["./example/fireworks/0.jpg", "fireworks" , 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
275
+ ["./example/flowers_swaying/0.jpg", "flowers swaying" , 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
276
+ ["./example/girl_walking_on_the_beach/0.jpg", "girl walking on the beach" , 50, 320, 512, 7.5, True, 0.23, 985, 10, 200],
277
+ ["./example/house_rotating/0.jpg", "house rotating" , 50, 320, 512, 7.5, True, 0.23, 985, 10, 100],
278
+ ["./example/people_runing/0.jpg", "people runing" , 50, 320, 512, 7.5, True, 0.23, 975, 10, 100],
279
  ]
280
 
281
  examples = gr.Examples(
282
  examples = EXAMPLES,
283
  fn = gen_video,
284
+ inputs=[input_image, prompt_textbox, sample_step_slider, height, width, txt_cfg_scale, use_dctinit, dct_coefficients, noise_level, motion_bucket_id, seed_textbox],
285
  outputs=[result_video],
286
  # cache_examples=True,
287
  cache_examples="lazy",