jbilcke-hf HF staff commited on
Commit
c6cc468
1 Parent(s): f76ca19

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -24
app.py CHANGED
@@ -6,28 +6,24 @@ import gradio as gr
6
  import numpy as np
7
  import PIL.Image
8
  import torch
9
- from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
13
 
 
 
 
14
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
15
  if torch.cuda.is_available():
16
- unet = UNet2DConditionModel.from_pretrained(
17
- "latent-consistency/lcm-ssd-1b",
18
- torch_dtype=torch.float16,
19
- variant="fp16"
20
- )
21
-
22
- pipe = DiffusionPipeline.from_pretrained(
23
- "segmind/SSD-1B",
24
- unet=unet,
25
- torch_dtype=torch.float16,
26
- variant="fp16"
27
- )
28
-
29
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
30
- pipe.to(device)
 
 
 
 
 
31
  else:
32
  pipe = None
33
 
@@ -43,7 +39,6 @@ def generate(prompt: str,
43
  seed: int = 0,
44
  width: int = 1024,
45
  height: int = 1024,
46
- guidance_scale: float = 1.0,
47
  num_inference_steps: int = 6) -> PIL.Image.Image:
48
 
49
  generator = torch.Generator().manual_seed(seed)
@@ -55,7 +50,6 @@ def generate(prompt: str,
55
  negative_prompt=negative_prompt,
56
  width=width,
57
  height=height,
58
- guidance_scale=guidance_scale,
59
  num_inference_steps=num_inference_steps,
60
  generator=generator,
61
  output_type='pil').images[0]
@@ -105,12 +99,6 @@ with gr.Blocks() as demo:
105
  value=1024,
106
  )
107
  with gr.Row():
108
- guidance_scale = gr.Slider(
109
- label='Guidance scale',
110
- minimum=1,
111
- maximum=20,
112
- step=0.1,
113
- value=5.0)
114
  num_inference_steps = gr.Slider(
115
  label='Number of inference steps',
116
  minimum=2,
@@ -133,7 +121,6 @@ with gr.Blocks() as demo:
133
  seed,
134
  width,
135
  height,
136
- guidance_scale,
137
  num_inference_steps,
138
  ]
139
  prompt.submit(
 
6
  import numpy as np
7
  import PIL.Image
8
  import torch
9
+ from diffusers import LCMScheduler, AutoPipelineForText2Image
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
13
 
14
+ MODEL_ID = "segmind/SSD-1B"
15
+ ADAPTER_ID = "latent-consistency/lcm-lora-ssd-1b"
16
+
17
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
18
  if torch.cuda.is_available():
19
+ pipe = AutoPipelineForText2Image.from_pretrained(MODEL_ID, torch_dtype=torch.float16, variant="fp16")
 
 
 
 
 
 
 
 
 
 
 
 
20
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
21
+ pipe.to("cuda")
22
+
23
+ # load and fuse
24
+ pipe.load_lora_weights(ADAPTER_ID)
25
+ pipe.fuse_lora()
26
+
27
  else:
28
  pipe = None
29
 
 
39
  seed: int = 0,
40
  width: int = 1024,
41
  height: int = 1024,
 
42
  num_inference_steps: int = 6) -> PIL.Image.Image:
43
 
44
  generator = torch.Generator().manual_seed(seed)
 
50
  negative_prompt=negative_prompt,
51
  width=width,
52
  height=height,
 
53
  num_inference_steps=num_inference_steps,
54
  generator=generator,
55
  output_type='pil').images[0]
 
99
  value=1024,
100
  )
101
  with gr.Row():
 
 
 
 
 
 
102
  num_inference_steps = gr.Slider(
103
  label='Number of inference steps',
104
  minimum=2,
 
121
  seed,
122
  width,
123
  height,
 
124
  num_inference_steps,
125
  ]
126
  prompt.submit(