orderlymirror commited on
Commit
cc0e894
1 Parent(s): 5492f74

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +7 -6
  2. app.py +174 -0
  3. requirements.txt +8 -0
  4. style.css +7 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Text To Video
3
- emoji: 🚀
4
- colorFrom: green
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 5.5.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Instant Video
3
+ emoji:
4
+ colorFrom: blue
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.26.0
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Fast Text 2 Video Generator
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import os
4
+ import spaces
5
+ import uuid
6
+
7
+ from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
8
+ from diffusers.utils import export_to_video
9
+ from huggingface_hub import hf_hub_download
10
+ from safetensors.torch import load_file
11
+ from PIL import Image
12
+
13
+ MORE = """ ## TRY Other Demos
14
+ ### Instant Image: 4k images in 5 Second -> https://huggingface.co/spaces/KingNish/Instant-Image
15
+ """
16
+
17
+ # Constants
18
+ bases = {
19
+ "Cartoon": "frankjoshua/toonyou_beta6",
20
+ "Realistic": "emilianJR/epiCRealism",
21
+ "3d": "Lykon/DreamShaper",
22
+ "Anime": "Yntec/mistoonAnime2"
23
+ }
24
+ step_loaded = None
25
+ base_loaded = "Realistic"
26
+ motion_loaded = None
27
+
28
+ # Ensure model and scheduler are initialized in GPU-enabled function
29
+ if not torch.cuda.is_available():
30
+ raise NotImplementedError("No GPU detected!")
31
+
32
+ device = "cuda"
33
+ dtype = torch.float16
34
+ pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
35
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
36
+
37
+ # Safety checkers
38
+ from transformers import CLIPFeatureExtractor
39
+
40
+ feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
41
+
42
+ # Function
43
+ @spaces.GPU(duration=30,queue=False)
44
+ def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Progress()):
45
+ global step_loaded
46
+ global base_loaded
47
+ global motion_loaded
48
+ print(prompt, base, step)
49
+
50
+ if step_loaded != step:
51
+ repo = "ByteDance/AnimateDiff-Lightning"
52
+ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
53
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
54
+ step_loaded = step
55
+
56
+ if base_loaded != base:
57
+ pipe.unet.load_state_dict(torch.load(hf_hub_download(bases[base], "unet/diffusion_pytorch_model.bin"), map_location=device), strict=False)
58
+ base_loaded = base
59
+
60
+ if motion_loaded != motion:
61
+ pipe.unload_lora_weights()
62
+ if motion != "":
63
+ pipe.load_lora_weights(motion, adapter_name="motion")
64
+ pipe.set_adapters(["motion"], [0.7])
65
+ motion_loaded = motion
66
+
67
+ progress((0, step))
68
+ def progress_callback(i, t, z):
69
+ progress((i+1, step))
70
+
71
+ output = pipe(prompt=prompt, guidance_scale=1.2, num_inference_steps=step, callback=progress_callback, callback_steps=1)
72
+
73
+ name = str(uuid.uuid4()).replace("-", "")
74
+ path = f"/tmp/{name}.mp4"
75
+ export_to_video(output.frames[0], path, fps=10)
76
+ return path
77
+
78
+
79
+ # Gradio Interface
80
+ with gr.Blocks(css="style.css") as demo:
81
+ gr.HTML(
82
+ "<h1><center>Instant⚡Video</center></h1>" +
83
+ "<p><center><span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.</center></p>" +
84
+ "<p><center><strong>First Video Generating takes time then Videos generate faster.</p>" +
85
+ "<p><center>To get best results Make Sure to Write prompts in style as Given in Examples/p>" +
86
+ "<p><a href='https://huggingface.co/spaces/KingNish/Instant-Video/discussions/1' >Must Share you Best Results with Community - Click HERE<a></p>"
87
+ )
88
+ with gr.Group():
89
+ with gr.Row():
90
+ prompt = gr.Textbox(
91
+ label='Prompt'
92
+ )
93
+ with gr.Row():
94
+ select_base = gr.Dropdown(
95
+ label='Base model',
96
+ choices=[
97
+ "Cartoon",
98
+ "Realistic",
99
+ "3d",
100
+ "Anime",
101
+ ],
102
+ value=base_loaded,
103
+ interactive=True
104
+ )
105
+ select_motion = gr.Dropdown(
106
+ label='Motion',
107
+ choices=[
108
+ ("Default", ""),
109
+ ("Zoom in", "guoyww/animatediff-motion-lora-zoom-in"),
110
+ ("Zoom out", "guoyww/animatediff-motion-lora-zoom-out"),
111
+ ("Tilt up", "guoyww/animatediff-motion-lora-tilt-up"),
112
+ ("Tilt down", "guoyww/animatediff-motion-lora-tilt-down"),
113
+ ("Pan left", "guoyww/animatediff-motion-lora-pan-left"),
114
+ ("Pan right", "guoyww/animatediff-motion-lora-pan-right"),
115
+ ("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
116
+ ("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
117
+ ],
118
+ value="guoyww/animatediff-motion-lora-zoom-in",
119
+ interactive=True
120
+ )
121
+ select_step = gr.Dropdown(
122
+ label='Inference steps',
123
+ choices=[
124
+ ('1-Step', 1),
125
+ ('2-Step', 2),
126
+ ('4-Step', 4),
127
+ ('8-Step', 8),
128
+ ],
129
+ value=4,
130
+ interactive=True
131
+ )
132
+ submit = gr.Button(
133
+ scale=1,
134
+ variant='primary'
135
+ )
136
+ video = gr.Video(
137
+ label='AnimateDiff-Lightning',
138
+ autoplay=True,
139
+ height=512,
140
+ width=512,
141
+ elem_id="video_output"
142
+ )
143
+
144
+ gr.on(triggers=[
145
+ submit.click,
146
+ prompt.submit
147
+ ],
148
+ fn = generate_image,
149
+ inputs = [prompt, select_base, select_motion, select_step],
150
+ outputs = [video],
151
+ api_name = "instant_video",
152
+ queue = False
153
+ )
154
+
155
+ gr.Examples(
156
+ examples=[
157
+ ["Focus: Eiffel Tower (Animate: Clouds moving)"], #Atmosphere Movement Example
158
+ ["Focus: Trees In forest (Animate: Lion running)"], #Object Movement Example
159
+ ["Focus: Astronaut in Space"], #Normal
160
+ ["Focus: Group of Birds in sky (Animate: Birds Moving) (Shot From distance)"], #Camera distance
161
+ ["Focus: Statue of liberty (Shot from Drone) (Animate: Drone coming toward statue)"], #Camera Movement
162
+ ["Focus: Panda in Forest (Animate: Drinking Tea)"], #Doing Something
163
+ ["Focus: Kids Playing (Season: Winter)"], #Atmosphere or Season
164
+ {"Focus: Cars in Street (Season: Rain, Daytime) (Shot from Distance) (Movement: Cars running)"} #Mixture
165
+ ],
166
+ fn=generate_image,
167
+ inputs=[prompt],
168
+ outputs=[video],
169
+ cache_examples="lazy",
170
+ )
171
+
172
+ demo.queue().launch()
173
+
174
+ Translate
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ diffusers
3
+ gradio
4
+ torch
5
+ transformers
6
+ opencv-python
7
+ peft
8
+ spaces
style.css ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ .gradio-container {
2
+ max-width: 800px !important;
3
+ }
4
+
5
+ #video_output {
6
+ margin: 0 auto
7
+ }