Datasets:

ArXiv:
Wauplin HF staff commited on
Commit
6b79d8d
1 Parent(s): f562984

Delete clip_guided_images_mixing_stable_diffusion.py

Browse files
clip_guided_images_mixing_stable_diffusion.py DELETED
@@ -1,445 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import inspect
3
- from typing import Optional, Union
4
-
5
- import numpy as np
6
- import PIL.Image
7
- import torch
8
- from torch.nn import functional as F
9
- from torchvision import transforms
10
- from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
-
12
- from diffusers import (
13
- AutoencoderKL,
14
- DDIMScheduler,
15
- DPMSolverMultistepScheduler,
16
- LMSDiscreteScheduler,
17
- PNDMScheduler,
18
- UNet2DConditionModel,
19
- )
20
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
21
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
- from diffusers.utils import PIL_INTERPOLATION
23
- from diffusers.utils.torch_utils import randn_tensor
24
-
25
-
26
- def preprocess(image, w, h):
27
- if isinstance(image, torch.Tensor):
28
- return image
29
- elif isinstance(image, PIL.Image.Image):
30
- image = [image]
31
-
32
- if isinstance(image[0], PIL.Image.Image):
33
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
34
- image = np.concatenate(image, axis=0)
35
- image = np.array(image).astype(np.float32) / 255.0
36
- image = image.transpose(0, 3, 1, 2)
37
- image = 2.0 * image - 1.0
38
- image = torch.from_numpy(image)
39
- elif isinstance(image[0], torch.Tensor):
40
- image = torch.cat(image, dim=0)
41
- return image
42
-
43
-
44
- def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
45
- if not isinstance(v0, np.ndarray):
46
- inputs_are_torch = True
47
- input_device = v0.device
48
- v0 = v0.cpu().numpy()
49
- v1 = v1.cpu().numpy()
50
-
51
- dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
52
- if np.abs(dot) > DOT_THRESHOLD:
53
- v2 = (1 - t) * v0 + t * v1
54
- else:
55
- theta_0 = np.arccos(dot)
56
- sin_theta_0 = np.sin(theta_0)
57
- theta_t = theta_0 * t
58
- sin_theta_t = np.sin(theta_t)
59
- s0 = np.sin(theta_0 - theta_t) / sin_theta_0
60
- s1 = sin_theta_t / sin_theta_0
61
- v2 = s0 * v0 + s1 * v1
62
-
63
- if inputs_are_torch:
64
- v2 = torch.from_numpy(v2).to(input_device)
65
-
66
- return v2
67
-
68
-
69
- def spherical_dist_loss(x, y):
70
- x = F.normalize(x, dim=-1)
71
- y = F.normalize(y, dim=-1)
72
- return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
73
-
74
-
75
- def set_requires_grad(model, value):
76
- for param in model.parameters():
77
- param.requires_grad = value
78
-
79
-
80
- class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
81
- def __init__(
82
- self,
83
- vae: AutoencoderKL,
84
- text_encoder: CLIPTextModel,
85
- clip_model: CLIPModel,
86
- tokenizer: CLIPTokenizer,
87
- unet: UNet2DConditionModel,
88
- scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
89
- feature_extractor: CLIPFeatureExtractor,
90
- coca_model=None,
91
- coca_tokenizer=None,
92
- coca_transform=None,
93
- ):
94
- super().__init__()
95
- self.register_modules(
96
- vae=vae,
97
- text_encoder=text_encoder,
98
- clip_model=clip_model,
99
- tokenizer=tokenizer,
100
- unet=unet,
101
- scheduler=scheduler,
102
- feature_extractor=feature_extractor,
103
- coca_model=coca_model,
104
- coca_tokenizer=coca_tokenizer,
105
- coca_transform=coca_transform,
106
- )
107
- self.feature_extractor_size = (
108
- feature_extractor.size
109
- if isinstance(feature_extractor.size, int)
110
- else feature_extractor.size["shortest_edge"]
111
- )
112
- self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
113
- set_requires_grad(self.text_encoder, False)
114
- set_requires_grad(self.clip_model, False)
115
-
116
- def freeze_vae(self):
117
- set_requires_grad(self.vae, False)
118
-
119
- def unfreeze_vae(self):
120
- set_requires_grad(self.vae, True)
121
-
122
- def freeze_unet(self):
123
- set_requires_grad(self.unet, False)
124
-
125
- def unfreeze_unet(self):
126
- set_requires_grad(self.unet, True)
127
-
128
- def get_timesteps(self, num_inference_steps, strength, device):
129
- # get the original timestep using init_timestep
130
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
131
-
132
- t_start = max(num_inference_steps - init_timestep, 0)
133
- timesteps = self.scheduler.timesteps[t_start:]
134
-
135
- return timesteps, num_inference_steps - t_start
136
-
137
- def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
138
- if not isinstance(image, torch.Tensor):
139
- raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
140
-
141
- image = image.to(device=device, dtype=dtype)
142
-
143
- if isinstance(generator, list):
144
- init_latents = [
145
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
146
- ]
147
- init_latents = torch.cat(init_latents, dim=0)
148
- else:
149
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
150
-
151
- # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
152
- init_latents = 0.18215 * init_latents
153
- init_latents = init_latents.repeat_interleave(batch_size, dim=0)
154
-
155
- noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
156
-
157
- # get latents
158
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
159
- latents = init_latents
160
-
161
- return latents
162
-
163
- def get_image_description(self, image):
164
- transformed_image = self.coca_transform(image).unsqueeze(0)
165
- with torch.no_grad(), torch.cuda.amp.autocast():
166
- generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
167
- generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
168
- return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
169
-
170
- def get_clip_image_embeddings(self, image, batch_size):
171
- clip_image_input = self.feature_extractor.preprocess(image)
172
- clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
173
- image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
174
- image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
175
- image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
176
- return image_embeddings_clip
177
-
178
- @torch.enable_grad()
179
- def cond_fn(
180
- self,
181
- latents,
182
- timestep,
183
- index,
184
- text_embeddings,
185
- noise_pred_original,
186
- original_image_embeddings_clip,
187
- clip_guidance_scale,
188
- ):
189
- latents = latents.detach().requires_grad_()
190
-
191
- latent_model_input = self.scheduler.scale_model_input(latents, timestep)
192
-
193
- # predict the noise residual
194
- noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
195
-
196
- if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
197
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
198
- beta_prod_t = 1 - alpha_prod_t
199
- # compute predicted original sample from predicted noise also called
200
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
201
- pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
202
-
203
- fac = torch.sqrt(beta_prod_t)
204
- sample = pred_original_sample * (fac) + latents * (1 - fac)
205
- elif isinstance(self.scheduler, LMSDiscreteScheduler):
206
- sigma = self.scheduler.sigmas[index]
207
- sample = latents - sigma * noise_pred
208
- else:
209
- raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
210
-
211
- # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
212
- sample = 1 / 0.18215 * sample
213
- image = self.vae.decode(sample).sample
214
- image = (image / 2 + 0.5).clamp(0, 1)
215
-
216
- image = transforms.Resize(self.feature_extractor_size)(image)
217
- image = self.normalize(image).to(latents.dtype)
218
-
219
- image_embeddings_clip = self.clip_model.get_image_features(image)
220
- image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
221
-
222
- loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
223
-
224
- grads = -torch.autograd.grad(loss, latents)[0]
225
-
226
- if isinstance(self.scheduler, LMSDiscreteScheduler):
227
- latents = latents.detach() + grads * (sigma**2)
228
- noise_pred = noise_pred_original
229
- else:
230
- noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
231
- return noise_pred, latents
232
-
233
- @torch.no_grad()
234
- def __call__(
235
- self,
236
- style_image: Union[torch.Tensor, PIL.Image.Image],
237
- content_image: Union[torch.Tensor, PIL.Image.Image],
238
- style_prompt: Optional[str] = None,
239
- content_prompt: Optional[str] = None,
240
- height: Optional[int] = 512,
241
- width: Optional[int] = 512,
242
- noise_strength: float = 0.6,
243
- num_inference_steps: Optional[int] = 50,
244
- guidance_scale: Optional[float] = 7.5,
245
- batch_size: Optional[int] = 1,
246
- eta: float = 0.0,
247
- clip_guidance_scale: Optional[float] = 100,
248
- generator: Optional[torch.Generator] = None,
249
- output_type: Optional[str] = "pil",
250
- return_dict: bool = True,
251
- slerp_latent_style_strength: float = 0.8,
252
- slerp_prompt_style_strength: float = 0.1,
253
- slerp_clip_image_style_strength: float = 0.1,
254
- ):
255
- if isinstance(generator, list) and len(generator) != batch_size:
256
- raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
257
-
258
- if height % 8 != 0 or width % 8 != 0:
259
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
260
-
261
- if isinstance(generator, torch.Generator) and batch_size > 1:
262
- generator = [generator] + [None] * (batch_size - 1)
263
-
264
- coca_is_none = [
265
- ("model", self.coca_model is None),
266
- ("tokenizer", self.coca_tokenizer is None),
267
- ("transform", self.coca_transform is None),
268
- ]
269
- coca_is_none = [x[0] for x in coca_is_none if x[1]]
270
- coca_is_none_str = ", ".join(coca_is_none)
271
- # generate prompts with coca model if prompt is None
272
- if content_prompt is None:
273
- if len(coca_is_none):
274
- raise ValueError(
275
- f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
276
- f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
277
- )
278
- content_prompt = self.get_image_description(content_image)
279
- if style_prompt is None:
280
- if len(coca_is_none):
281
- raise ValueError(
282
- f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
283
- f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
284
- )
285
- style_prompt = self.get_image_description(style_image)
286
-
287
- # get prompt text embeddings for content and style
288
- content_text_input = self.tokenizer(
289
- content_prompt,
290
- padding="max_length",
291
- max_length=self.tokenizer.model_max_length,
292
- truncation=True,
293
- return_tensors="pt",
294
- )
295
- content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
296
-
297
- style_text_input = self.tokenizer(
298
- style_prompt,
299
- padding="max_length",
300
- max_length=self.tokenizer.model_max_length,
301
- truncation=True,
302
- return_tensors="pt",
303
- )
304
- style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
305
-
306
- text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
307
-
308
- # duplicate text embeddings for each generation per prompt
309
- text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
310
-
311
- # set timesteps
312
- accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
313
- extra_set_kwargs = {}
314
- if accepts_offset:
315
- extra_set_kwargs["offset"] = 1
316
-
317
- self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
318
- # Some schedulers like PNDM have timesteps as arrays
319
- # It's more optimized to move all timesteps to correct device beforehand
320
- self.scheduler.timesteps.to(self.device)
321
-
322
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
323
- latent_timestep = timesteps[:1].repeat(batch_size)
324
-
325
- # Preprocess image
326
- preprocessed_content_image = preprocess(content_image, width, height)
327
- content_latents = self.prepare_latents(
328
- preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
329
- )
330
-
331
- preprocessed_style_image = preprocess(style_image, width, height)
332
- style_latents = self.prepare_latents(
333
- preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
334
- )
335
-
336
- latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
337
-
338
- if clip_guidance_scale > 0:
339
- content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
340
- style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
341
- clip_image_embeddings = slerp(
342
- slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
343
- )
344
-
345
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
346
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
347
- # corresponds to doing no classifier free guidance.
348
- do_classifier_free_guidance = guidance_scale > 1.0
349
- # get unconditional embeddings for classifier free guidance
350
- if do_classifier_free_guidance:
351
- max_length = content_text_input.input_ids.shape[-1]
352
- uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
353
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
354
- # duplicate unconditional embeddings for each generation per prompt
355
- uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
356
-
357
- # For classifier free guidance, we need to do two forward passes.
358
- # Here we concatenate the unconditional and text embeddings into a single batch
359
- # to avoid doing two forward passes
360
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
361
-
362
- # get the initial random noise unless the user supplied it
363
-
364
- # Unlike in other pipelines, latents need to be generated in the target device
365
- # for 1-to-1 results reproducibility with the CompVis implementation.
366
- # However this currently doesn't work in `mps`.
367
- latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
368
- latents_dtype = text_embeddings.dtype
369
- if latents is None:
370
- if self.device.type == "mps":
371
- # randn does not work reproducibly on mps
372
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
373
- self.device
374
- )
375
- else:
376
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
377
- else:
378
- if latents.shape != latents_shape:
379
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
380
- latents = latents.to(self.device)
381
-
382
- # scale the initial noise by the standard deviation required by the scheduler
383
- latents = latents * self.scheduler.init_noise_sigma
384
-
385
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
386
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
387
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
388
- # and should be between [0, 1]
389
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
390
- extra_step_kwargs = {}
391
- if accepts_eta:
392
- extra_step_kwargs["eta"] = eta
393
-
394
- # check if the scheduler accepts generator
395
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
396
- if accepts_generator:
397
- extra_step_kwargs["generator"] = generator
398
-
399
- with self.progress_bar(total=num_inference_steps) as progress_bar:
400
- for i, t in enumerate(timesteps):
401
- # expand the latents if we are doing classifier free guidance
402
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
403
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
404
-
405
- # predict the noise residual
406
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
407
-
408
- # perform classifier free guidance
409
- if do_classifier_free_guidance:
410
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
411
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
412
-
413
- # perform clip guidance
414
- if clip_guidance_scale > 0:
415
- text_embeddings_for_guidance = (
416
- text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
417
- )
418
- noise_pred, latents = self.cond_fn(
419
- latents,
420
- t,
421
- i,
422
- text_embeddings_for_guidance,
423
- noise_pred,
424
- clip_image_embeddings,
425
- clip_guidance_scale,
426
- )
427
-
428
- # compute the previous noisy sample x_t -> x_t-1
429
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
430
-
431
- progress_bar.update()
432
- # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
433
- latents = 1 / 0.18215 * latents
434
- image = self.vae.decode(latents).sample
435
-
436
- image = (image / 2 + 0.5).clamp(0, 1)
437
- image = image.cpu().permute(0, 2, 3, 1).numpy()
438
-
439
- if output_type == "pil":
440
- image = self.numpy_to_pil(image)
441
-
442
- if not return_dict:
443
- return (image, None)
444
-
445
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)