asahi417 commited on
Commit
bf9d629
1 Parent(s): ca0d530

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +7 -7
  2. app.py +61 -0
  3. requirements.txt +8 -0
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
  title: Stable Video Diffusion
3
- emoji: 📉
4
- colorFrom: pink
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Stable Video Diffusion
3
+ emoji: 📺
4
+ colorFrom: green
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 4.37.2
8
  app_file: app.py
9
  pinned: false
10
+ license: other
11
+ disable_embedding: true
12
+ ---
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from glob import glob
4
+ from diffusers.utils import load_image
5
+ import spaces
6
+ from panna import SVD
7
+
8
+
9
+ model = SVD()
10
+ example_files = []
11
+ root_url = "https://huggingface.co/spaces/multimodalart/stable-video-diffusion/resolve/main/images"
12
+ examples = ["disaster_meme.png", "distracted_meme.png", "hide_meme.png", "success_meme.png", "willy_meme.png", "wink_meme.png"]
13
+ for example in examples:
14
+ load_image(f"{root_url}/{example}").save(example)
15
+ tmp_output_dir = "outputs"
16
+ os.makedirs(tmp_output_dir, exist_ok=True)
17
+ title = ("# [Stable Video Diffusion](ttps://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt)\n"
18
+ "The demo is part of [panna](https://github.com/abacws-abacus/panna) project.")
19
+
20
+
21
+ @spaces.GPU(duration=120)
22
+ def infer(init_image, motion_bucket_id, noise_aug_strength, decode_chunk_size, fps, seed):
23
+ base_count = len(glob(os.path.join(tmp_output_dir, "*.mp4")))
24
+ video_path = os.path.join(tmp_output_dir, f"{base_count:06d}.mp4")
25
+ frames = model.image2video(
26
+ [init_image],
27
+ motion_bucket_id=motion_bucket_id,
28
+ noise_aug_strength=noise_aug_strength,
29
+ decode_chunk_size=decode_chunk_size,
30
+ fps=fps,
31
+ seed=seed
32
+ )
33
+ model.export(frames[0], video_path, fps)
34
+ return video_path
35
+
36
+
37
+ with gr.Blocks() as demo:
38
+ gr.Markdown(title)
39
+ with gr.Row():
40
+ with gr.Column():
41
+ image = gr.Image(label="Upload your image", type="pil")
42
+ run_button = gr.Button("Generate", scale=0)
43
+ video = gr.Video()
44
+ with gr.Accordion("Advanced options", open=False):
45
+ seed = gr.Slider(label="Seed", minimum=0, maximum=1_000_000, step=1, value=0)
46
+ motion_bucket_id = gr.Slider(label="Motion bucket id", minimum=1, maximum=255, step=1, value=127)
47
+ noise_aug_strength = gr.Slider(label="Noise Strength", minimum=0, maximum=1, step=0.01, value=0.02)
48
+ fps = gr.Slider(label="Frames per second", minimum=5, maximum=30, step=1, value=6)
49
+ decode_chunk_size = gr.Slider(label="Frames per second", minimum=1, maximum=10, step=1, value=2)
50
+ run_button.click(
51
+ fn=infer,
52
+ inputs=[image, motion_bucket_id, noise_aug_strength, decode_chunk_size, fps, seed],
53
+ outputs=[video]
54
+ )
55
+ gr.Examples(
56
+ fn=infer,
57
+ examples=examples,
58
+ inputs=image,
59
+ outputs=[video]
60
+ )
61
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ https://gradio-builds.s3.amazonaws.com/756e3431d65172df986a7e335dce8136206a293a/gradio-4.7.1-py3-none-any.whl
2
+ git+https://github.com/huggingface/diffusers.git
3
+ transformers
4
+ accelerate
5
+ safetensors
6
+ opencv-python
7
+ uuid
8
+ panna>=0.0.1