linoyts HF staff commited on
Commit
e76fcaf
1 Parent(s): d079ab7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -7
app.py CHANGED
@@ -51,9 +51,26 @@ def nms(x, t, s):
51
  z[y > t] = 255
52
  return z
53
 
54
-
55
- DESCRIPTION = '''# ⚡️Flash⚡️ Scribble SDXL 🖋️🌄
56
- super fast sketch to image with Flash SDXL, using [@xinsir](https://huggingface.co/xinsir) [scribble sdxl controlnet](https://huggingface.co/xinsir/controlnet-scribble-sdxl-1.0) and [sdxl flash](https://huggingface.co/sd-community/sdxl-flash)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  '''
58
 
59
  if not torch.cuda.is_available():
@@ -124,11 +141,21 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str
124
 
125
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
126
 
 
 
 
127
  controlnet = ControlNetModel.from_pretrained(
128
  "xinsir/controlnet-scribble-sdxl-1.0",
129
  torch_dtype=torch.float16
130
  )
131
 
 
 
 
 
 
 
 
132
  # when test with other base model, you need to change the vae also.
133
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
134
 
@@ -140,7 +167,21 @@ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
140
  # scheduler=eulera_scheduler,
141
  )
142
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
143
  pipe.to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  # Load model.
145
 
146
  MAX_SEED = np.iinfo(np.int32).max
@@ -178,6 +219,7 @@ def run(
178
  controlnet_conditioning_scale: float = 1.0,
179
  seed: int = 0,
180
  use_hed: bool = False,
 
181
  progress=gr.Progress(track_tqdm=True),
182
  ) -> PIL.Image.Image:
183
  width, height = image['composite'].size
@@ -185,7 +227,13 @@ def run(
185
  new_width, new_height = int(width * ratio), int(height * ratio)
186
  image = image['composite'].resize((new_width, new_height))
187
 
188
- if not use_hed:
 
 
 
 
 
 
189
  controlnet_img = image
190
  else:
191
  controlnet_img = processor(image, scribble=False)
@@ -204,7 +252,8 @@ def run(
204
  prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
205
 
206
  generator = torch.Generator(device=device).manual_seed(seed)
207
- out = pipe(
 
208
  prompt=prompt,
209
  negative_prompt=negative_prompt,
210
  image=image,
@@ -215,6 +264,17 @@ def run(
215
  width=new_width,
216
  height=new_height,
217
  ).images[0]
 
 
 
 
 
 
 
 
 
 
 
218
 
219
  return (controlnet_img, out)
220
 
@@ -234,6 +294,7 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
234
  prompt = gr.Textbox(label="Prompt")
235
  style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
236
  use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
 
237
  run_button = gr.Button("Run")
238
  with gr.Accordion("Advanced options", open=False):
239
  negative_prompt = gr.Textbox(
@@ -243,9 +304,9 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
243
  num_steps = gr.Slider(
244
  label="Number of steps",
245
  minimum=1,
246
- maximum=20,
247
  step=1,
248
- value=10,
249
  )
250
  guidance_scale = gr.Slider(
251
  label="Guidance scale",
@@ -285,6 +346,7 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
285
  controlnet_conditioning_scale,
286
  seed,
287
  use_hed,
 
288
  ]
289
  outputs = [image_slider]
290
  run_button.click(
 
51
  z[y > t] = 255
52
  return z
53
 
54
+ def HWC3(x):
55
+ assert x.dtype == np.uint8
56
+ if x.ndim == 2:
57
+ x = x[:, :, None]
58
+ assert x.ndim == 3
59
+ H, W, C = x.shape
60
+ assert C == 1 or C == 3 or C == 4
61
+ if C == 3:
62
+ return x
63
+ if C == 1:
64
+ return np.concatenate([x, x, x], axis=2)
65
+ if C == 4:
66
+ color = x[:, :, 0:3].astype(np.float32)
67
+ alpha = x[:, :, 3:4].astype(np.float32) / 255.0
68
+ y = color * alpha + 255.0 * (1.0 - alpha)
69
+ y = y.clip(0, 255).astype(np.uint8)
70
+ return y
71
+
72
+ DESCRIPTION = '''# Scribble SDXL 🖋️🌄
73
+ sketch to image with SDXL, using [@xinsir](https://huggingface.co/xinsir) [scribble sdxl controlnet](https://huggingface.co/xinsir/controlnet-scribble-sdxl-1.0)
74
  '''
75
 
76
  if not torch.cuda.is_available():
 
141
 
142
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
143
 
144
+ # eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler")
145
+
146
+
147
  controlnet = ControlNetModel.from_pretrained(
148
  "xinsir/controlnet-scribble-sdxl-1.0",
149
  torch_dtype=torch.float16
150
  )
151
 
152
+ controlnet_canny = ControlNetModel.from_pretrained(
153
+ "xinsir/controlnet-canny-sdxl-1.0",
154
+ torch_dtype=torch.float16
155
+ )
156
+
157
+
158
+
159
  # when test with other base model, you need to change the vae also.
160
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
161
 
 
167
  # scheduler=eulera_scheduler,
168
  )
169
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
170
+
171
  pipe.to(device)
172
+
173
+ pipe_canny = StableDiffusionXLControlNetPipeline.from_pretrained(
174
+ "stabilityai/stable-diffusion-xl-base-1.0",
175
+ controlnet=controlnet_canny,
176
+ vae=vae,
177
+ safety_checker=None,
178
+ torch_dtype=torch.float16,
179
+ # scheduler=eulera_scheduler,
180
+ )
181
+
182
+ pipe_canny.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_canny.scheduler.config)
183
+
184
+ pipe_canny.to(device)
185
  # Load model.
186
 
187
  MAX_SEED = np.iinfo(np.int32).max
 
219
  controlnet_conditioning_scale: float = 1.0,
220
  seed: int = 0,
221
  use_hed: bool = False,
222
+ use_canny: bool = False,
223
  progress=gr.Progress(track_tqdm=True),
224
  ) -> PIL.Image.Image:
225
  width, height = image['composite'].size
 
227
  new_width, new_height = int(width * ratio), int(height * ratio)
228
  image = image['composite'].resize((new_width, new_height))
229
 
230
+ if use_canny:
231
+ controlnet_img = np.array(image)
232
+ controlnet_img = cv2.Canny(controlnet_img, 100, 200)
233
+ controlnet_img = HWC3(controlnet_img)
234
+ image = Image.fromarray(controlnet_img)
235
+
236
+ elif not use_hed:
237
  controlnet_img = image
238
  else:
239
  controlnet_img = processor(image, scribble=False)
 
252
  prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
253
 
254
  generator = torch.Generator(device=device).manual_seed(seed)
255
+ if use_canny:
256
+ out = pipe_canny(
257
  prompt=prompt,
258
  negative_prompt=negative_prompt,
259
  image=image,
 
264
  width=new_width,
265
  height=new_height,
266
  ).images[0]
267
+ else:
268
+ out = pipe(
269
+ prompt=prompt,
270
+ negative_prompt=negative_prompt,
271
+ image=image,
272
+ num_inference_steps=num_steps,
273
+ generator=generator,
274
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
275
+ guidance_scale=guidance_scale,
276
+ width=new_width,
277
+ height=new_height,).images[0]
278
 
279
  return (controlnet_img, out)
280
 
 
294
  prompt = gr.Textbox(label="Prompt")
295
  style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
296
  use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
297
+ use_canny = gr.Checkbox(label="use Canny", value=False, info="check this to use ControlNet canny instead of scribble")
298
  run_button = gr.Button("Run")
299
  with gr.Accordion("Advanced options", open=False):
300
  negative_prompt = gr.Textbox(
 
304
  num_steps = gr.Slider(
305
  label="Number of steps",
306
  minimum=1,
307
+ maximum=50,
308
  step=1,
309
+ value=1,
310
  )
311
  guidance_scale = gr.Slider(
312
  label="Guidance scale",
 
346
  controlnet_conditioning_scale,
347
  seed,
348
  use_hed,
349
+ use_canny
350
  ]
351
  outputs = [image_slider]
352
  run_button.click(