Datasets:

ArXiv:
Wauplin HF staff commited on
Commit
af67792
1 Parent(s): 8669de2

Delete clip_guided_stable_diffusion_img2img.py

Browse files
clip_guided_stable_diffusion_img2img.py DELETED
@@ -1,490 +0,0 @@
1
- import inspect
2
- from typing import List, Optional, Union
3
-
4
- import numpy as np
5
- import PIL.Image
6
- import torch
7
- from torch import nn
8
- from torch.nn import functional as F
9
- from torchvision import transforms
10
- from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
-
12
- from diffusers import (
13
- AutoencoderKL,
14
- DDIMScheduler,
15
- DPMSolverMultistepScheduler,
16
- LMSDiscreteScheduler,
17
- PNDMScheduler,
18
- UNet2DConditionModel,
19
- )
20
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
21
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
- from diffusers.utils import PIL_INTERPOLATION, deprecate
23
- from diffusers.utils.torch_utils import randn_tensor
24
-
25
-
26
- EXAMPLE_DOC_STRING = """
27
- Examples:
28
- ```py
29
- from io import BytesIO
30
-
31
- import requests
32
- import torch
33
- from diffusers import DiffusionPipeline
34
- from PIL import Image
35
- from transformers import CLIPFeatureExtractor, CLIPModel
36
-
37
- feature_extractor = CLIPFeatureExtractor.from_pretrained(
38
- "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
39
- )
40
- clip_model = CLIPModel.from_pretrained(
41
- "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
42
- )
43
-
44
-
45
- guided_pipeline = DiffusionPipeline.from_pretrained(
46
- "CompVis/stable-diffusion-v1-4",
47
- # custom_pipeline="clip_guided_stable_diffusion",
48
- custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
49
- clip_model=clip_model,
50
- feature_extractor=feature_extractor,
51
- torch_dtype=torch.float16,
52
- )
53
- guided_pipeline.enable_attention_slicing()
54
- guided_pipeline = guided_pipeline.to("cuda")
55
-
56
- prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
57
-
58
- url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
59
-
60
- response = requests.get(url)
61
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
62
-
63
- image = guided_pipeline(
64
- prompt=prompt,
65
- num_inference_steps=30,
66
- image=init_image,
67
- strength=0.75,
68
- guidance_scale=7.5,
69
- clip_guidance_scale=100,
70
- num_cutouts=4,
71
- use_cutouts=False,
72
- ).images[0]
73
- display(image)
74
- ```
75
- """
76
-
77
-
78
- def preprocess(image, w, h):
79
- if isinstance(image, torch.Tensor):
80
- return image
81
- elif isinstance(image, PIL.Image.Image):
82
- image = [image]
83
-
84
- if isinstance(image[0], PIL.Image.Image):
85
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
86
- image = np.concatenate(image, axis=0)
87
- image = np.array(image).astype(np.float32) / 255.0
88
- image = image.transpose(0, 3, 1, 2)
89
- image = 2.0 * image - 1.0
90
- image = torch.from_numpy(image)
91
- elif isinstance(image[0], torch.Tensor):
92
- image = torch.cat(image, dim=0)
93
- return image
94
-
95
-
96
- class MakeCutouts(nn.Module):
97
- def __init__(self, cut_size, cut_power=1.0):
98
- super().__init__()
99
-
100
- self.cut_size = cut_size
101
- self.cut_power = cut_power
102
-
103
- def forward(self, pixel_values, num_cutouts):
104
- sideY, sideX = pixel_values.shape[2:4]
105
- max_size = min(sideX, sideY)
106
- min_size = min(sideX, sideY, self.cut_size)
107
- cutouts = []
108
- for _ in range(num_cutouts):
109
- size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
110
- offsetx = torch.randint(0, sideX - size + 1, ())
111
- offsety = torch.randint(0, sideY - size + 1, ())
112
- cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
113
- cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
114
- return torch.cat(cutouts)
115
-
116
-
117
- def spherical_dist_loss(x, y):
118
- x = F.normalize(x, dim=-1)
119
- y = F.normalize(y, dim=-1)
120
- return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
121
-
122
-
123
- def set_requires_grad(model, value):
124
- for param in model.parameters():
125
- param.requires_grad = value
126
-
127
-
128
- class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
129
- """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
130
- - https://github.com/Jack000/glid-3-xl
131
- - https://github.dev/crowsonkb/k-diffusion
132
- """
133
-
134
- def __init__(
135
- self,
136
- vae: AutoencoderKL,
137
- text_encoder: CLIPTextModel,
138
- clip_model: CLIPModel,
139
- tokenizer: CLIPTokenizer,
140
- unet: UNet2DConditionModel,
141
- scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
142
- feature_extractor: CLIPFeatureExtractor,
143
- ):
144
- super().__init__()
145
- self.register_modules(
146
- vae=vae,
147
- text_encoder=text_encoder,
148
- clip_model=clip_model,
149
- tokenizer=tokenizer,
150
- unet=unet,
151
- scheduler=scheduler,
152
- feature_extractor=feature_extractor,
153
- )
154
-
155
- self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
156
- self.cut_out_size = (
157
- feature_extractor.size
158
- if isinstance(feature_extractor.size, int)
159
- else feature_extractor.size["shortest_edge"]
160
- )
161
- self.make_cutouts = MakeCutouts(self.cut_out_size)
162
-
163
- set_requires_grad(self.text_encoder, False)
164
- set_requires_grad(self.clip_model, False)
165
-
166
- def freeze_vae(self):
167
- set_requires_grad(self.vae, False)
168
-
169
- def unfreeze_vae(self):
170
- set_requires_grad(self.vae, True)
171
-
172
- def freeze_unet(self):
173
- set_requires_grad(self.unet, False)
174
-
175
- def unfreeze_unet(self):
176
- set_requires_grad(self.unet, True)
177
-
178
- def get_timesteps(self, num_inference_steps, strength, device):
179
- # get the original timestep using init_timestep
180
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
181
-
182
- t_start = max(num_inference_steps - init_timestep, 0)
183
- timesteps = self.scheduler.timesteps[t_start:]
184
-
185
- return timesteps, num_inference_steps - t_start
186
-
187
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
188
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
189
- raise ValueError(
190
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
191
- )
192
-
193
- image = image.to(device=device, dtype=dtype)
194
-
195
- batch_size = batch_size * num_images_per_prompt
196
- if isinstance(generator, list) and len(generator) != batch_size:
197
- raise ValueError(
198
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
199
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
200
- )
201
-
202
- if isinstance(generator, list):
203
- init_latents = [
204
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
205
- ]
206
- init_latents = torch.cat(init_latents, dim=0)
207
- else:
208
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
209
-
210
- init_latents = self.vae.config.scaling_factor * init_latents
211
-
212
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
213
- # expand init_latents for batch_size
214
- deprecation_message = (
215
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
216
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
217
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
218
- " your script to pass as many initial images as text prompts to suppress this warning."
219
- )
220
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
221
- additional_image_per_prompt = batch_size // init_latents.shape[0]
222
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
223
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
224
- raise ValueError(
225
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
226
- )
227
- else:
228
- init_latents = torch.cat([init_latents], dim=0)
229
-
230
- shape = init_latents.shape
231
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
232
-
233
- # get latents
234
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
235
- latents = init_latents
236
-
237
- return latents
238
-
239
- @torch.enable_grad()
240
- def cond_fn(
241
- self,
242
- latents,
243
- timestep,
244
- index,
245
- text_embeddings,
246
- noise_pred_original,
247
- text_embeddings_clip,
248
- clip_guidance_scale,
249
- num_cutouts,
250
- use_cutouts=True,
251
- ):
252
- latents = latents.detach().requires_grad_()
253
-
254
- latent_model_input = self.scheduler.scale_model_input(latents, timestep)
255
-
256
- # predict the noise residual
257
- noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
258
-
259
- if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
260
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
261
- beta_prod_t = 1 - alpha_prod_t
262
- # compute predicted original sample from predicted noise also called
263
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
264
- pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
265
-
266
- fac = torch.sqrt(beta_prod_t)
267
- sample = pred_original_sample * (fac) + latents * (1 - fac)
268
- elif isinstance(self.scheduler, LMSDiscreteScheduler):
269
- sigma = self.scheduler.sigmas[index]
270
- sample = latents - sigma * noise_pred
271
- else:
272
- raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
273
-
274
- sample = 1 / self.vae.config.scaling_factor * sample
275
- image = self.vae.decode(sample).sample
276
- image = (image / 2 + 0.5).clamp(0, 1)
277
-
278
- if use_cutouts:
279
- image = self.make_cutouts(image, num_cutouts)
280
- else:
281
- image = transforms.Resize(self.cut_out_size)(image)
282
- image = self.normalize(image).to(latents.dtype)
283
-
284
- image_embeddings_clip = self.clip_model.get_image_features(image)
285
- image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
286
-
287
- if use_cutouts:
288
- dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
289
- dists = dists.view([num_cutouts, sample.shape[0], -1])
290
- loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
291
- else:
292
- loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
293
-
294
- grads = -torch.autograd.grad(loss, latents)[0]
295
-
296
- if isinstance(self.scheduler, LMSDiscreteScheduler):
297
- latents = latents.detach() + grads * (sigma**2)
298
- noise_pred = noise_pred_original
299
- else:
300
- noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
301
- return noise_pred, latents
302
-
303
- @torch.no_grad()
304
- def __call__(
305
- self,
306
- prompt: Union[str, List[str]],
307
- height: Optional[int] = 512,
308
- width: Optional[int] = 512,
309
- image: Union[torch.Tensor, PIL.Image.Image] = None,
310
- strength: float = 0.8,
311
- num_inference_steps: Optional[int] = 50,
312
- guidance_scale: Optional[float] = 7.5,
313
- num_images_per_prompt: Optional[int] = 1,
314
- eta: float = 0.0,
315
- clip_guidance_scale: Optional[float] = 100,
316
- clip_prompt: Optional[Union[str, List[str]]] = None,
317
- num_cutouts: Optional[int] = 4,
318
- use_cutouts: Optional[bool] = True,
319
- generator: Optional[torch.Generator] = None,
320
- latents: Optional[torch.Tensor] = None,
321
- output_type: Optional[str] = "pil",
322
- return_dict: bool = True,
323
- ):
324
- if isinstance(prompt, str):
325
- batch_size = 1
326
- elif isinstance(prompt, list):
327
- batch_size = len(prompt)
328
- else:
329
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
330
-
331
- if height % 8 != 0 or width % 8 != 0:
332
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
333
-
334
- # get prompt text embeddings
335
- text_input = self.tokenizer(
336
- prompt,
337
- padding="max_length",
338
- max_length=self.tokenizer.model_max_length,
339
- truncation=True,
340
- return_tensors="pt",
341
- )
342
- text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
343
- # duplicate text embeddings for each generation per prompt
344
- text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
345
-
346
- # set timesteps
347
- accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
348
- extra_set_kwargs = {}
349
- if accepts_offset:
350
- extra_set_kwargs["offset"] = 1
351
-
352
- self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
353
- # Some schedulers like PNDM have timesteps as arrays
354
- # It's more optimized to move all timesteps to correct device beforehand
355
- self.scheduler.timesteps.to(self.device)
356
-
357
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
358
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
359
-
360
- # Preprocess image
361
- image = preprocess(image, width, height)
362
- if latents is None:
363
- latents = self.prepare_latents(
364
- image,
365
- latent_timestep,
366
- batch_size,
367
- num_images_per_prompt,
368
- text_embeddings.dtype,
369
- self.device,
370
- generator,
371
- )
372
-
373
- if clip_guidance_scale > 0:
374
- if clip_prompt is not None:
375
- clip_text_input = self.tokenizer(
376
- clip_prompt,
377
- padding="max_length",
378
- max_length=self.tokenizer.model_max_length,
379
- truncation=True,
380
- return_tensors="pt",
381
- ).input_ids.to(self.device)
382
- else:
383
- clip_text_input = text_input.input_ids.to(self.device)
384
- text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
385
- text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
386
- # duplicate text embeddings clip for each generation per prompt
387
- text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
388
-
389
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
390
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
391
- # corresponds to doing no classifier free guidance.
392
- do_classifier_free_guidance = guidance_scale > 1.0
393
- # get unconditional embeddings for classifier free guidance
394
- if do_classifier_free_guidance:
395
- max_length = text_input.input_ids.shape[-1]
396
- uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
397
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
398
- # duplicate unconditional embeddings for each generation per prompt
399
- uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
400
-
401
- # For classifier free guidance, we need to do two forward passes.
402
- # Here we concatenate the unconditional and text embeddings into a single batch
403
- # to avoid doing two forward passes
404
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
405
-
406
- # get the initial random noise unless the user supplied it
407
-
408
- # Unlike in other pipelines, latents need to be generated in the target device
409
- # for 1-to-1 results reproducibility with the CompVis implementation.
410
- # However this currently doesn't work in `mps`.
411
- latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
412
- latents_dtype = text_embeddings.dtype
413
- if latents is None:
414
- if self.device.type == "mps":
415
- # randn does not work reproducibly on mps
416
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
417
- self.device
418
- )
419
- else:
420
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
421
- else:
422
- if latents.shape != latents_shape:
423
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
424
- latents = latents.to(self.device)
425
-
426
- # scale the initial noise by the standard deviation required by the scheduler
427
- latents = latents * self.scheduler.init_noise_sigma
428
-
429
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
430
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
431
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
432
- # and should be between [0, 1]
433
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
434
- extra_step_kwargs = {}
435
- if accepts_eta:
436
- extra_step_kwargs["eta"] = eta
437
-
438
- # check if the scheduler accepts generator
439
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
440
- if accepts_generator:
441
- extra_step_kwargs["generator"] = generator
442
-
443
- with self.progress_bar(total=num_inference_steps):
444
- for i, t in enumerate(timesteps):
445
- # expand the latents if we are doing classifier free guidance
446
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
447
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
448
-
449
- # predict the noise residual
450
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
451
-
452
- # perform classifier free guidance
453
- if do_classifier_free_guidance:
454
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
455
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
456
-
457
- # perform clip guidance
458
- if clip_guidance_scale > 0:
459
- text_embeddings_for_guidance = (
460
- text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
461
- )
462
- noise_pred, latents = self.cond_fn(
463
- latents,
464
- t,
465
- i,
466
- text_embeddings_for_guidance,
467
- noise_pred,
468
- text_embeddings_clip,
469
- clip_guidance_scale,
470
- num_cutouts,
471
- use_cutouts,
472
- )
473
-
474
- # compute the previous noisy sample x_t -> x_t-1
475
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
476
-
477
- # scale and decode the image latents with vae
478
- latents = 1 / self.vae.config.scaling_factor * latents
479
- image = self.vae.decode(latents).sample
480
-
481
- image = (image / 2 + 0.5).clamp(0, 1)
482
- image = image.cpu().permute(0, 2, 3, 1).numpy()
483
-
484
- if output_type == "pil":
485
- image = self.numpy_to_pil(image)
486
-
487
- if not return_dict:
488
- return (image, None)
489
-
490
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)