KingNish commited on
Commit
cc50ae5
1 Parent(s): 9e54e91

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +165 -0
app.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import os
4
+ import spaces
5
+ import uuid
6
+
7
+ from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
8
+ from diffusers.utils import export_to_video
9
+ from huggingface_hub import hf_hub_download
10
+ from safetensors.torch import load_file
11
+ from PIL import Image
12
+
13
+ # Constants
14
+ bases = {
15
+ "Cartoon": "frankjoshua/toonyou_beta6",
16
+ "Realistic": "emilianJR/epiCRealism",
17
+ "3d": "Lykon/DreamShaper",
18
+ "Anime": "Yntec/mistoonAnime2"
19
+ }
20
+ step_loaded = None
21
+ base_loaded = "Realistic"
22
+ motion_loaded = None
23
+
24
+ # Ensure model and scheduler are initialized in GPU-enabled function
25
+ if not torch.cuda.is_available():
26
+ raise NotImplementedError("No GPU detected!")
27
+
28
+ device = "cuda"
29
+ dtype = torch.float16
30
+ pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
31
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
32
+
33
+ # Safety checkers
34
+ from transformers import CLIPFeatureExtractor
35
+
36
+ feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
37
+
38
+ # Function
39
+ @spaces.GPU(enable_queue=True)
40
+ def generate_image(prompt, base, motion, step, progress=gr.Progress()):
41
+ global step_loaded
42
+ global base_loaded
43
+ global motion_loaded
44
+ print(prompt, base, step)
45
+
46
+ if step_loaded != step:
47
+ repo = "ByteDance/AnimateDiff-Lightning"
48
+ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
49
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
50
+ step_loaded = step
51
+
52
+ if base_loaded != base:
53
+ pipe.unet.load_state_dict(torch.load(hf_hub_download(bases[base], "unet/diffusion_pytorch_model.bin"), map_location=device), strict=False)
54
+ base_loaded = base
55
+
56
+ if motion_loaded != motion:
57
+ pipe.unload_lora_weights()
58
+ if motion != "":
59
+ pipe.load_lora_weights(motion, adapter_name="motion")
60
+ pipe.set_adapters(["motion"], [0.7])
61
+ motion_loaded = motion
62
+
63
+ progress((0, step))
64
+ def progress_callback(i, t, z):
65
+ progress((i+1, step))
66
+
67
+ output = pipe(prompt=prompt, guidance_scale=1.2, num_inference_steps=step, callback=progress_callback, callback_steps=1)
68
+
69
+ name = str(uuid.uuid4()).replace("-", "")
70
+ path = f"/tmp/{name}.mp4"
71
+ export_to_video(output.frames[0], path, fps=10)
72
+ return path
73
+
74
+
75
+ # Gradio Interface
76
+ with gr.Blocks(css="style.css") as demo:
77
+ gr.HTML(
78
+ "<h1><center>Instant⚡Video</center></h1>" +
79
+ "<p><center>Lightning-fast text-to-video generation</center></p>" +
80
+ "<p><center><span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.</center></p>" +
81
+ "<p><center><strong> First Image processing takes time then images generate faster.</p>" +
82
+ "<p><center>Write prompts in style as Given in Example</p>"
83
+ )
84
+ with gr.Group():
85
+ with gr.Row():
86
+ prompt = gr.Textbox(
87
+ label='Prompt'
88
+ )
89
+ with gr.Row():
90
+ select_base = gr.Dropdown(
91
+ label='Base model',
92
+ choices=[
93
+ "Cartoon",
94
+ "Realistic",
95
+ "3d",
96
+ "Anime",
97
+ ],
98
+ value=base_loaded,
99
+ interactive=True
100
+ )
101
+ select_motion = gr.Dropdown(
102
+ label='Motion',
103
+ choices=[
104
+ ("Default", ""),
105
+ ("Zoom in", "guoyww/animatediff-motion-lora-zoom-in"),
106
+ ("Zoom out", "guoyww/animatediff-motion-lora-zoom-out"),
107
+ ("Tilt up", "guoyww/animatediff-motion-lora-tilt-up"),
108
+ ("Tilt down", "guoyww/animatediff-motion-lora-tilt-down"),
109
+ ("Pan left", "guoyww/animatediff-motion-lora-pan-left"),
110
+ ("Pan right", "guoyww/animatediff-motion-lora-pan-right"),
111
+ ("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
112
+ ("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
113
+ ],
114
+ value="guoyww/animatediff-motion-lora-zoom-in",
115
+ interactive=True
116
+ )
117
+ select_step = gr.Dropdown(
118
+ label='Inference steps',
119
+ choices=[
120
+ ('1-Step', 1),
121
+ ('2-Step', 2),
122
+ ('4-Step', 4),
123
+ ('8-Step', 8),
124
+ ],
125
+ value=4,
126
+ interactive=True
127
+ )
128
+ submit = gr.Button(
129
+ scale=1,
130
+ variant='primary'
131
+ )
132
+ video = gr.Video(
133
+ label='AnimateDiff-Lightning',
134
+ autoplay=True,
135
+ height=512,
136
+ width=512,
137
+ elem_id="video_output"
138
+ )
139
+
140
+ prompt.submit(
141
+ fn=generate_image,
142
+ inputs=[prompt, select_base, select_motion, select_step],
143
+ outputs=video,
144
+ )
145
+ submit.click(
146
+ fn=generate_image,
147
+ inputs=[prompt, select_base, select_motion, select_step],
148
+ outputs=video,
149
+ )
150
+
151
+ gr.Examples(
152
+ examples=[
153
+ ["Focus: Eiffel Tower (Animate: Clouds moving)"],
154
+ ["Focus: Lion in forest (Animate: Lion as walking)"],
155
+ ["Focus: Astronaut in Space"],
156
+ ["Focus: Group of Birds in sky (Animate: Birds Moving) (Shot From distance)"],
157
+ ["Focus: Statue of liberty (Shot from Drone) (Animate: Drone coming toward statue)"],
158
+ ],
159
+ fn=generate_image,
160
+ inputs=[prompt, select_base, select_motion, select_step],
161
+ outputs=video,
162
+ cache_examples=False,
163
+ )
164
+
165
+ demo.queue().launch()