KingNish commited on
Commit
2c31dbf
1 Parent(s): fab1585

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -34
app.py CHANGED
@@ -10,10 +10,6 @@ from huggingface_hub import hf_hub_download
10
  from safetensors.torch import load_file
11
  from PIL import Image
12
 
13
- MORE = """ ## TRY Other Demos
14
- ### Instant Image: 4k images in 5 Second -> https://huggingface.co/spaces/KingNish/Instant-Image
15
- """
16
-
17
  # Constants
18
  bases = {
19
  "Cartoon": "frankjoshua/toonyou_beta6",
@@ -21,49 +17,63 @@ bases = {
21
  "3d": "Lykon/DreamShaper",
22
  "Anime": "Yntec/mistoonAnime2"
23
  }
24
- step_loaded = None
25
- base_loaded = "Realistic"
26
- motion_loaded = None
27
 
28
- # Ensure model and scheduler are initialized in GPU-enabled function
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  if not torch.cuda.is_available():
30
  raise NotImplementedError("No GPU detected!")
31
 
32
  device = "cuda"
33
  dtype = torch.float16
34
- pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
35
- pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
36
 
37
- # Safety checkers
38
- from transformers import CLIPFeatureExtractor
 
 
 
 
 
 
 
 
 
39
 
40
- feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
41
 
42
  # Function
43
  @spaces.GPU(duration=60,queue=False)
44
- def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Progress()):
45
- global step_loaded
46
- global base_loaded
47
- global motion_loaded
48
- print(prompt, base, step)
49
-
50
- if step_loaded != step:
51
- repo = "ByteDance/AnimateDiff-Lightning"
52
- ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
53
- pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
54
- step_loaded = step
55
 
56
- if base_loaded != base:
57
- pipe.unet.load_state_dict(torch.load(hf_hub_download(bases[base], "unet/diffusion_pytorch_model.bin"), map_location=device), strict=False)
58
- base_loaded = base
 
59
 
60
- if motion_loaded != motion:
61
- pipe.unload_lora_weights()
62
- if motion != "":
63
- pipe.load_lora_weights(motion, adapter_name="motion")
64
- pipe.set_adapters(["motion"], [0.7])
65
- motion_loaded = motion
66
 
 
 
 
 
 
 
 
 
 
67
  output = pipe(prompt=f"{base} image of {prompt}", guidance_scale=1.2, num_inference_steps=step)
68
 
69
  name = str(uuid.uuid4()).replace("-", "")
@@ -72,6 +82,7 @@ def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Prog
72
  return path
73
 
74
 
 
75
  # Gradio Interface
76
  with gr.Blocks(css="style.css") as demo:
77
  gr.HTML(
@@ -162,7 +173,7 @@ with gr.Blocks(css="style.css") as demo:
162
  fn=generate_image,
163
  inputs=[prompt],
164
  outputs=[video],
165
- cache_examples=True,
166
  )
167
 
168
  demo.queue().launch()
 
10
  from safetensors.torch import load_file
11
  from PIL import Image
12
 
 
 
 
 
13
  # Constants
14
  bases = {
15
  "Cartoon": "frankjoshua/toonyou_beta6",
 
17
  "3d": "Lykon/DreamShaper",
18
  "Anime": "Yntec/mistoonAnime2"
19
  }
 
 
 
20
 
21
+ motion_models = {
22
+ "Default": None,
23
+ "Zoom in": "guoyww/animatediff-motion-lora-zoom-in",
24
+ "Zoom out": "guoyww/animatediff-motion-lora-zoom-out",
25
+ "Tilt up": "guoyww/animatediff-motion-lora-tilt-up",
26
+ "Tilt down": "guoyww/animatediff-motion-lora-tilt-down",
27
+ "Pan left": "guoyww/animatediff-motion-lora-pan-left",
28
+ "Pan right": "guoyww/animatediff-motion-lora-pan-right",
29
+ "Roll left": "guoyww/animatediff-motion-lora-rolling-anticlockwise",
30
+ "Roll right": "guoyww/animatediff-motion-lora-rolling-clockwise",
31
+ }
32
+
33
+
34
+ # Preload models
35
  if not torch.cuda.is_available():
36
  raise NotImplementedError("No GPU detected!")
37
 
38
  device = "cuda"
39
  dtype = torch.float16
 
 
40
 
41
+ pipes = {}
42
+ for base_name, base_path in bases.items():
43
+ pipe = AnimateDiffPipeline.from_pretrained(base_path, torch_dtype=dtype).to(device)
44
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
45
+ pipes[base_name] = pipe
46
+
47
+ # Load motion models
48
+ for motion_name, motion_path in motion_models.items():
49
+ if motion_path:
50
+ motion_model = MotionAdapter.from_pretrained(motion_path, torch_dtype=dtype).to(device)
51
+ motion_models[motion_name] = motion_model
52
 
 
53
 
54
  # Function
55
  @spaces.GPU(duration=60,queue=False)
56
+ def generate_image(prompt, base="Realistic", motion="Default", step=8, progress=gr.Progress()):
57
+ global pipes
58
+ global motion_models
59
+
60
+ pipe = pipes[base]
 
 
 
 
 
 
61
 
62
+ if motion != "Default":
63
+ pipe.motion_adapter = motion_models[motion]
64
+ else:
65
+ pipe.motion_adapter = None
66
 
 
 
 
 
 
 
67
 
68
+ # Load step model if not already loaded
69
+ repo = "ByteDance/AnimateDiff-Lightning"
70
+ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
71
+ try:
72
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt, local_files_only=True), device=device), strict=False)
73
+ except:
74
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
75
+
76
+ # Generate image
77
  output = pipe(prompt=f"{base} image of {prompt}", guidance_scale=1.2, num_inference_steps=step)
78
 
79
  name = str(uuid.uuid4()).replace("-", "")
 
82
  return path
83
 
84
 
85
+
86
  # Gradio Interface
87
  with gr.Blocks(css="style.css") as demo:
88
  gr.HTML(
 
173
  fn=generate_image,
174
  inputs=[prompt],
175
  outputs=[video],
176
+ cache_examples="lazy",
177
  )
178
 
179
  demo.queue().launch()