Datasets:

ArXiv:
Wauplin HF staff commited on
Commit
628614e
1 Parent(s): af67792
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. composable_stable_diffusion.py +0 -532
  2. ddim_noise_comparative_analysis.py +0 -190
  3. dps_pipeline.py +0 -466
  4. edict_pipeline.py +0 -264
  5. fresco_v2v.py +0 -0
  6. gluegen.py +0 -816
  7. hd_painter.py +0 -994
  8. iadb.py +0 -149
  9. imagic_stable_diffusion.py +0 -470
  10. img2img_inpainting.py +0 -437
  11. instaflow_one_step.py +0 -685
  12. interpolate_stable_diffusion.py +0 -498
  13. ip_adapter_face_id.py +0 -1125
  14. kohya_hires_fix.py +0 -468
  15. latent_consistency_img2img.py +0 -821
  16. latent_consistency_interpolate.py +0 -995
  17. latent_consistency_txt2img.py +0 -729
  18. llm_grounded_diffusion.py +0 -1558
  19. lpw_stable_diffusion.py +0 -1371
  20. lpw_stable_diffusion_onnx.py +0 -1148
  21. lpw_stable_diffusion_xl.py +0 -0
  22. magic_mix.py +0 -152
  23. marigold_depth_estimation.py +0 -673
  24. masked_stable_diffusion_img2img.py +0 -262
  25. mixture_canvas.py +0 -501
  26. mixture_tiling.py +0 -405
  27. multilingual_stable_diffusion.py +0 -410
  28. one_step_unet.py +0 -24
  29. pipeline_animatediff_controlnet.py +0 -1125
  30. pipeline_animatediff_img2video.py +0 -980
  31. pipeline_demofusion_sdxl.py +0 -1392
  32. pipeline_fabric.py +0 -751
  33. pipeline_null_text_inversion.py +0 -260
  34. pipeline_prompt2prompt.py +0 -1422
  35. pipeline_sdxl_style_aligned.py +0 -1916
  36. pipeline_stable_diffusion_boxdiff.py +0 -1700
  37. pipeline_stable_diffusion_pag.py +0 -1471
  38. pipeline_stable_diffusion_upscale_ldm3d.py +0 -772
  39. pipeline_stable_diffusion_xl_controlnet_adapter.py +0 -1411
  40. pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +0 -1850
  41. pipeline_stable_diffusion_xl_differential_img2img.py +0 -1470
  42. pipeline_stable_diffusion_xl_instandid_img2img.py +0 -1077
  43. pipeline_stable_diffusion_xl_instantid.py +0 -1066
  44. pipeline_stable_diffusion_xl_ipex.py +0 -1434
  45. pipeline_zero1to3.py +0 -793
  46. regional_prompting_stable_diffusion.py +0 -620
  47. rerender_a_video.py +0 -1194
  48. run_onnx_controlnet.py +0 -911
  49. run_tensorrt_controlnet.py +0 -1022
  50. scheduling_ufogen.py +0 -521
composable_stable_diffusion.py DELETED
@@ -1,532 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Callable, List, Optional, Union
17
-
18
- import torch
19
- from packaging import version
20
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
21
-
22
- from diffusers import DiffusionPipeline
23
- from diffusers.configuration_utils import FrozenDict
24
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
25
- from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
26
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
27
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
28
- from diffusers.schedulers import (
29
- DDIMScheduler,
30
- DPMSolverMultistepScheduler,
31
- EulerAncestralDiscreteScheduler,
32
- EulerDiscreteScheduler,
33
- LMSDiscreteScheduler,
34
- PNDMScheduler,
35
- )
36
- from diffusers.utils import deprecate, logging
37
-
38
-
39
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
-
41
-
42
- class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
43
- r"""
44
- Pipeline for text-to-image generation using Stable Diffusion.
45
-
46
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
47
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
48
-
49
- Args:
50
- vae ([`AutoencoderKL`]):
51
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
52
- text_encoder ([`CLIPTextModel`]):
53
- Frozen text-encoder. Stable Diffusion uses the text portion of
54
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
55
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
56
- tokenizer (`CLIPTokenizer`):
57
- Tokenizer of class
58
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
59
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
60
- scheduler ([`SchedulerMixin`]):
61
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
62
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
63
- safety_checker ([`StableDiffusionSafetyChecker`]):
64
- Classification module that estimates whether generated images could be considered offensive or harmful.
65
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
66
- feature_extractor ([`CLIPImageProcessor`]):
67
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
68
- """
69
-
70
- _optional_components = ["safety_checker", "feature_extractor"]
71
-
72
- def __init__(
73
- self,
74
- vae: AutoencoderKL,
75
- text_encoder: CLIPTextModel,
76
- tokenizer: CLIPTokenizer,
77
- unet: UNet2DConditionModel,
78
- scheduler: Union[
79
- DDIMScheduler,
80
- PNDMScheduler,
81
- LMSDiscreteScheduler,
82
- EulerDiscreteScheduler,
83
- EulerAncestralDiscreteScheduler,
84
- DPMSolverMultistepScheduler,
85
- ],
86
- safety_checker: StableDiffusionSafetyChecker,
87
- feature_extractor: CLIPImageProcessor,
88
- requires_safety_checker: bool = True,
89
- ):
90
- super().__init__()
91
-
92
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
93
- deprecation_message = (
94
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
95
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
96
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
97
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
98
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
99
- " file"
100
- )
101
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
102
- new_config = dict(scheduler.config)
103
- new_config["steps_offset"] = 1
104
- scheduler._internal_dict = FrozenDict(new_config)
105
-
106
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
107
- deprecation_message = (
108
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
109
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
110
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
111
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
112
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
113
- )
114
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
115
- new_config = dict(scheduler.config)
116
- new_config["clip_sample"] = False
117
- scheduler._internal_dict = FrozenDict(new_config)
118
-
119
- if safety_checker is None and requires_safety_checker:
120
- logger.warning(
121
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
122
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
123
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
124
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
125
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
126
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
127
- )
128
-
129
- if safety_checker is not None and feature_extractor is None:
130
- raise ValueError(
131
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
132
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
133
- )
134
-
135
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
136
- version.parse(unet.config._diffusers_version).base_version
137
- ) < version.parse("0.9.0.dev0")
138
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
139
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
140
- deprecation_message = (
141
- "The configuration file of the unet has set the default `sample_size` to smaller than"
142
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
143
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
144
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
145
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
146
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
147
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
148
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
149
- " the `unet/config.json` file"
150
- )
151
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
152
- new_config = dict(unet.config)
153
- new_config["sample_size"] = 64
154
- unet._internal_dict = FrozenDict(new_config)
155
-
156
- self.register_modules(
157
- vae=vae,
158
- text_encoder=text_encoder,
159
- tokenizer=tokenizer,
160
- unet=unet,
161
- scheduler=scheduler,
162
- safety_checker=safety_checker,
163
- feature_extractor=feature_extractor,
164
- )
165
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
166
- self.register_to_config(requires_safety_checker=requires_safety_checker)
167
-
168
- def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
169
- r"""
170
- Encodes the prompt into text encoder hidden states.
171
-
172
- Args:
173
- prompt (`str` or `list(int)`):
174
- prompt to be encoded
175
- device: (`torch.device`):
176
- torch device
177
- num_images_per_prompt (`int`):
178
- number of images that should be generated per prompt
179
- do_classifier_free_guidance (`bool`):
180
- whether to use classifier free guidance or not
181
- negative_prompt (`str` or `List[str]`):
182
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
183
- if `guidance_scale` is less than `1`).
184
- """
185
- batch_size = len(prompt) if isinstance(prompt, list) else 1
186
-
187
- text_inputs = self.tokenizer(
188
- prompt,
189
- padding="max_length",
190
- max_length=self.tokenizer.model_max_length,
191
- truncation=True,
192
- return_tensors="pt",
193
- )
194
- text_input_ids = text_inputs.input_ids
195
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
196
-
197
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
198
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
199
- logger.warning(
200
- "The following part of your input was truncated because CLIP can only handle sequences up to"
201
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
202
- )
203
-
204
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
205
- attention_mask = text_inputs.attention_mask.to(device)
206
- else:
207
- attention_mask = None
208
-
209
- text_embeddings = self.text_encoder(
210
- text_input_ids.to(device),
211
- attention_mask=attention_mask,
212
- )
213
- text_embeddings = text_embeddings[0]
214
-
215
- # duplicate text embeddings for each generation per prompt, using mps friendly method
216
- bs_embed, seq_len, _ = text_embeddings.shape
217
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
218
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
219
-
220
- # get unconditional embeddings for classifier free guidance
221
- if do_classifier_free_guidance:
222
- uncond_tokens: List[str]
223
- if negative_prompt is None:
224
- uncond_tokens = [""] * batch_size
225
- elif type(prompt) is not type(negative_prompt):
226
- raise TypeError(
227
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
228
- f" {type(prompt)}."
229
- )
230
- elif isinstance(negative_prompt, str):
231
- uncond_tokens = [negative_prompt]
232
- elif batch_size != len(negative_prompt):
233
- raise ValueError(
234
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
235
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
236
- " the batch size of `prompt`."
237
- )
238
- else:
239
- uncond_tokens = negative_prompt
240
-
241
- max_length = text_input_ids.shape[-1]
242
- uncond_input = self.tokenizer(
243
- uncond_tokens,
244
- padding="max_length",
245
- max_length=max_length,
246
- truncation=True,
247
- return_tensors="pt",
248
- )
249
-
250
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
251
- attention_mask = uncond_input.attention_mask.to(device)
252
- else:
253
- attention_mask = None
254
-
255
- uncond_embeddings = self.text_encoder(
256
- uncond_input.input_ids.to(device),
257
- attention_mask=attention_mask,
258
- )
259
- uncond_embeddings = uncond_embeddings[0]
260
-
261
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
262
- seq_len = uncond_embeddings.shape[1]
263
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
264
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
265
-
266
- # For classifier free guidance, we need to do two forward passes.
267
- # Here we concatenate the unconditional and text embeddings into a single batch
268
- # to avoid doing two forward passes
269
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
270
-
271
- return text_embeddings
272
-
273
- def run_safety_checker(self, image, device, dtype):
274
- if self.safety_checker is not None:
275
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
276
- image, has_nsfw_concept = self.safety_checker(
277
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
278
- )
279
- else:
280
- has_nsfw_concept = None
281
- return image, has_nsfw_concept
282
-
283
- def decode_latents(self, latents):
284
- latents = 1 / 0.18215 * latents
285
- image = self.vae.decode(latents).sample
286
- image = (image / 2 + 0.5).clamp(0, 1)
287
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
288
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
289
- return image
290
-
291
- def prepare_extra_step_kwargs(self, generator, eta):
292
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
293
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
294
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
295
- # and should be between [0, 1]
296
-
297
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
298
- extra_step_kwargs = {}
299
- if accepts_eta:
300
- extra_step_kwargs["eta"] = eta
301
-
302
- # check if the scheduler accepts generator
303
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
304
- if accepts_generator:
305
- extra_step_kwargs["generator"] = generator
306
- return extra_step_kwargs
307
-
308
- def check_inputs(self, prompt, height, width, callback_steps):
309
- if not isinstance(prompt, str) and not isinstance(prompt, list):
310
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
311
-
312
- if height % 8 != 0 or width % 8 != 0:
313
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
314
-
315
- if (callback_steps is None) or (
316
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
317
- ):
318
- raise ValueError(
319
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
320
- f" {type(callback_steps)}."
321
- )
322
-
323
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
324
- shape = (
325
- batch_size,
326
- num_channels_latents,
327
- int(height) // self.vae_scale_factor,
328
- int(width) // self.vae_scale_factor,
329
- )
330
- if latents is None:
331
- if device.type == "mps":
332
- # randn does not work reproducibly on mps
333
- latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
334
- else:
335
- latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
336
- else:
337
- if latents.shape != shape:
338
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
339
- latents = latents.to(device)
340
-
341
- # scale the initial noise by the standard deviation required by the scheduler
342
- latents = latents * self.scheduler.init_noise_sigma
343
- return latents
344
-
345
- @torch.no_grad()
346
- def __call__(
347
- self,
348
- prompt: Union[str, List[str]],
349
- height: Optional[int] = None,
350
- width: Optional[int] = None,
351
- num_inference_steps: int = 50,
352
- guidance_scale: float = 7.5,
353
- negative_prompt: Optional[Union[str, List[str]]] = None,
354
- num_images_per_prompt: Optional[int] = 1,
355
- eta: float = 0.0,
356
- generator: Optional[torch.Generator] = None,
357
- latents: Optional[torch.Tensor] = None,
358
- output_type: Optional[str] = "pil",
359
- return_dict: bool = True,
360
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
361
- callback_steps: int = 1,
362
- weights: Optional[str] = "",
363
- ):
364
- r"""
365
- Function invoked when calling the pipeline for generation.
366
-
367
- Args:
368
- prompt (`str` or `List[str]`):
369
- The prompt or prompts to guide the image generation.
370
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
371
- The height in pixels of the generated image.
372
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
373
- The width in pixels of the generated image.
374
- num_inference_steps (`int`, *optional*, defaults to 50):
375
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
376
- expense of slower inference.
377
- guidance_scale (`float`, *optional*, defaults to 5.0):
378
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
379
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
380
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
381
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
382
- usually at the expense of lower image quality.
383
- negative_prompt (`str` or `List[str]`, *optional*):
384
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
385
- if `guidance_scale` is less than `1`).
386
- num_images_per_prompt (`int`, *optional*, defaults to 1):
387
- The number of images to generate per prompt.
388
- eta (`float`, *optional*, defaults to 0.0):
389
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
390
- [`schedulers.DDIMScheduler`], will be ignored for others.
391
- generator (`torch.Generator`, *optional*):
392
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
393
- deterministic.
394
- latents (`torch.Tensor`, *optional*):
395
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
396
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
397
- tensor will ge generated by sampling using the supplied random `generator`.
398
- output_type (`str`, *optional*, defaults to `"pil"`):
399
- The output format of the generate image. Choose between
400
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
401
- return_dict (`bool`, *optional*, defaults to `True`):
402
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
403
- plain tuple.
404
- callback (`Callable`, *optional*):
405
- A function that will be called every `callback_steps` steps during inference. The function will be
406
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
407
- callback_steps (`int`, *optional*, defaults to 1):
408
- The frequency at which the `callback` function will be called. If not specified, the callback will be
409
- called at every step.
410
-
411
- Returns:
412
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
413
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
414
- When returning a tuple, the first element is a list with the generated images, and the second element is a
415
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
416
- (nsfw) content, according to the `safety_checker`.
417
- """
418
- # 0. Default height and width to unet
419
- height = height or self.unet.config.sample_size * self.vae_scale_factor
420
- width = width or self.unet.config.sample_size * self.vae_scale_factor
421
-
422
- # 1. Check inputs. Raise error if not correct
423
- self.check_inputs(prompt, height, width, callback_steps)
424
-
425
- # 2. Define call parameters
426
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
427
- device = self._execution_device
428
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
429
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
430
- # corresponds to doing no classifier free guidance.
431
- do_classifier_free_guidance = guidance_scale > 1.0
432
-
433
- if "|" in prompt:
434
- prompt = [x.strip() for x in prompt.split("|")]
435
- print(f"composing {prompt}...")
436
-
437
- if not weights:
438
- # specify weights for prompts (excluding the unconditional score)
439
- print("using equal positive weights (conjunction) for all prompts...")
440
- weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1)
441
- else:
442
- # set prompt weight for each
443
- num_prompts = len(prompt) if isinstance(prompt, list) else 1
444
- weights = [float(w.strip()) for w in weights.split("|")]
445
- # guidance scale as the default
446
- if len(weights) < num_prompts:
447
- weights.append(guidance_scale)
448
- else:
449
- weights = weights[:num_prompts]
450
- assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts"
451
- weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1)
452
- else:
453
- weights = guidance_scale
454
-
455
- # 3. Encode input prompt
456
- text_embeddings = self._encode_prompt(
457
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
458
- )
459
-
460
- # 4. Prepare timesteps
461
- self.scheduler.set_timesteps(num_inference_steps, device=device)
462
- timesteps = self.scheduler.timesteps
463
-
464
- # 5. Prepare latent variables
465
- num_channels_latents = self.unet.config.in_channels
466
- latents = self.prepare_latents(
467
- batch_size * num_images_per_prompt,
468
- num_channels_latents,
469
- height,
470
- width,
471
- text_embeddings.dtype,
472
- device,
473
- generator,
474
- latents,
475
- )
476
-
477
- # composable diffusion
478
- if isinstance(prompt, list) and batch_size == 1:
479
- # remove extra unconditional embedding
480
- # N = one unconditional embed + conditional embeds
481
- text_embeddings = text_embeddings[len(prompt) - 1 :]
482
-
483
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
484
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
485
-
486
- # 7. Denoising loop
487
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
488
- with self.progress_bar(total=num_inference_steps) as progress_bar:
489
- for i, t in enumerate(timesteps):
490
- # expand the latents if we are doing classifier free guidance
491
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
492
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
493
-
494
- # predict the noise residual
495
- noise_pred = []
496
- for j in range(text_embeddings.shape[0]):
497
- noise_pred.append(
498
- self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample
499
- )
500
- noise_pred = torch.cat(noise_pred, dim=0)
501
-
502
- # perform guidance
503
- if do_classifier_free_guidance:
504
- noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:]
505
- noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum(
506
- dim=0, keepdims=True
507
- )
508
-
509
- # compute the previous noisy sample x_t -> x_t-1
510
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
511
-
512
- # call the callback, if provided
513
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
514
- progress_bar.update()
515
- if callback is not None and i % callback_steps == 0:
516
- step_idx = i // getattr(self.scheduler, "order", 1)
517
- callback(step_idx, t, latents)
518
-
519
- # 8. Post-processing
520
- image = self.decode_latents(latents)
521
-
522
- # 9. Run safety checker
523
- image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
524
-
525
- # 10. Convert to PIL
526
- if output_type == "pil":
527
- image = self.numpy_to_pil(image)
528
-
529
- if not return_dict:
530
- return (image, has_nsfw_concept)
531
-
532
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ddim_noise_comparative_analysis.py DELETED
@@ -1,190 +0,0 @@
1
- # Copyright 2022 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import List, Optional, Tuple, Union
16
-
17
- import PIL.Image
18
- import torch
19
- from torchvision import transforms
20
-
21
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
- from diffusers.schedulers import DDIMScheduler
23
- from diffusers.utils.torch_utils import randn_tensor
24
-
25
-
26
- trans = transforms.Compose(
27
- [
28
- transforms.Resize((256, 256)),
29
- transforms.ToTensor(),
30
- transforms.Normalize([0.5], [0.5]),
31
- ]
32
- )
33
-
34
-
35
- def preprocess(image):
36
- if isinstance(image, torch.Tensor):
37
- return image
38
- elif isinstance(image, PIL.Image.Image):
39
- image = [image]
40
-
41
- image = [trans(img.convert("RGB")) for img in image]
42
- image = torch.stack(image)
43
- return image
44
-
45
-
46
- class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline):
47
- r"""
48
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
49
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
50
-
51
- Parameters:
52
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
53
- scheduler ([`SchedulerMixin`]):
54
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
55
- [`DDPMScheduler`], or [`DDIMScheduler`].
56
- """
57
-
58
- def __init__(self, unet, scheduler):
59
- super().__init__()
60
-
61
- # make sure scheduler can always be converted to DDIM
62
- scheduler = DDIMScheduler.from_config(scheduler.config)
63
-
64
- self.register_modules(unet=unet, scheduler=scheduler)
65
-
66
- def check_inputs(self, strength):
67
- if strength < 0 or strength > 1:
68
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
69
-
70
- def get_timesteps(self, num_inference_steps, strength, device):
71
- # get the original timestep using init_timestep
72
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
73
-
74
- t_start = max(num_inference_steps - init_timestep, 0)
75
- timesteps = self.scheduler.timesteps[t_start:]
76
-
77
- return timesteps, num_inference_steps - t_start
78
-
79
- def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
80
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
81
- raise ValueError(
82
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
83
- )
84
-
85
- init_latents = image.to(device=device, dtype=dtype)
86
-
87
- if isinstance(generator, list) and len(generator) != batch_size:
88
- raise ValueError(
89
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
90
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
91
- )
92
-
93
- shape = init_latents.shape
94
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
95
-
96
- # get latents
97
- print("add noise to latents at timestep", timestep)
98
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
99
- latents = init_latents
100
-
101
- return latents
102
-
103
- @torch.no_grad()
104
- def __call__(
105
- self,
106
- image: Union[torch.Tensor, PIL.Image.Image] = None,
107
- strength: float = 0.8,
108
- batch_size: int = 1,
109
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
110
- eta: float = 0.0,
111
- num_inference_steps: int = 50,
112
- use_clipped_model_output: Optional[bool] = None,
113
- output_type: Optional[str] = "pil",
114
- return_dict: bool = True,
115
- ) -> Union[ImagePipelineOutput, Tuple]:
116
- r"""
117
- Args:
118
- image (`torch.Tensor` or `PIL.Image.Image`):
119
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
120
- process.
121
- strength (`float`, *optional*, defaults to 0.8):
122
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
123
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
124
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
125
- be maximum and the denoising process will run for the full number of iterations specified in
126
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
127
- batch_size (`int`, *optional*, defaults to 1):
128
- The number of images to generate.
129
- generator (`torch.Generator`, *optional*):
130
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
131
- to make generation deterministic.
132
- eta (`float`, *optional*, defaults to 0.0):
133
- The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
134
- num_inference_steps (`int`, *optional*, defaults to 50):
135
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
136
- expense of slower inference.
137
- use_clipped_model_output (`bool`, *optional*, defaults to `None`):
138
- if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed
139
- downstream to the scheduler. So use `None` for schedulers which don't support this argument.
140
- output_type (`str`, *optional*, defaults to `"pil"`):
141
- The output format of the generate image. Choose between
142
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
143
- return_dict (`bool`, *optional*, defaults to `True`):
144
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
145
-
146
- Returns:
147
- [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
148
- True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
149
- """
150
- # 1. Check inputs. Raise error if not correct
151
- self.check_inputs(strength)
152
-
153
- # 2. Preprocess image
154
- image = preprocess(image)
155
-
156
- # 3. set timesteps
157
- self.scheduler.set_timesteps(num_inference_steps, device=self.device)
158
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
159
- latent_timestep = timesteps[:1].repeat(batch_size)
160
-
161
- # 4. Prepare latent variables
162
- latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator)
163
- image = latents
164
-
165
- # 5. Denoising loop
166
- for t in self.progress_bar(timesteps):
167
- # 1. predict noise model_output
168
- model_output = self.unet(image, t).sample
169
-
170
- # 2. predict previous mean of image x_t-1 and add variance depending on eta
171
- # eta corresponds to η in paper and should be between [0, 1]
172
- # do x_t -> x_t-1
173
- image = self.scheduler.step(
174
- model_output,
175
- t,
176
- image,
177
- eta=eta,
178
- use_clipped_model_output=use_clipped_model_output,
179
- generator=generator,
180
- ).prev_sample
181
-
182
- image = (image / 2 + 0.5).clamp(0, 1)
183
- image = image.cpu().permute(0, 2, 3, 1).numpy()
184
- if output_type == "pil":
185
- image = self.numpy_to_pil(image)
186
-
187
- if not return_dict:
188
- return (image, latent_timestep.item())
189
-
190
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dps_pipeline.py DELETED
@@ -1,466 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from math import pi
17
- from typing import Callable, List, Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import torch
21
- from PIL import Image
22
-
23
- from diffusers import DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DModel
24
- from diffusers.utils.torch_utils import randn_tensor
25
-
26
-
27
- class DPSPipeline(DiffusionPipeline):
28
- r"""
29
- Pipeline for Diffusion Posterior Sampling.
30
-
31
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
32
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
33
-
34
- Parameters:
35
- unet ([`UNet2DModel`]):
36
- A `UNet2DModel` to denoise the encoded image latents.
37
- scheduler ([`SchedulerMixin`]):
38
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
39
- [`DDPMScheduler`], or [`DDIMScheduler`].
40
- """
41
-
42
- model_cpu_offload_seq = "unet"
43
-
44
- def __init__(self, unet, scheduler):
45
- super().__init__()
46
- self.register_modules(unet=unet, scheduler=scheduler)
47
-
48
- @torch.no_grad()
49
- def __call__(
50
- self,
51
- measurement: torch.Tensor,
52
- operator: torch.nn.Module,
53
- loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
54
- batch_size: int = 1,
55
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
56
- num_inference_steps: int = 1000,
57
- output_type: Optional[str] = "pil",
58
- return_dict: bool = True,
59
- zeta: float = 0.3,
60
- ) -> Union[ImagePipelineOutput, Tuple]:
61
- r"""
62
- The call function to the pipeline for generation.
63
-
64
- Args:
65
- measurement (`torch.Tensor`, *required*):
66
- A 'torch.Tensor', the corrupted image
67
- operator (`torch.nn.Module`, *required*):
68
- A 'torch.nn.Module', the operator generating the corrupted image
69
- loss_fn (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *required*):
70
- A 'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', the loss function used
71
- between the measurements, for most of the cases using RMSE is fine.
72
- batch_size (`int`, *optional*, defaults to 1):
73
- The number of images to generate.
74
- generator (`torch.Generator`, *optional*):
75
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
76
- generation deterministic.
77
- num_inference_steps (`int`, *optional*, defaults to 1000):
78
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
79
- expense of slower inference.
80
- output_type (`str`, *optional*, defaults to `"pil"`):
81
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
82
- return_dict (`bool`, *optional*, defaults to `True`):
83
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
84
-
85
- Example:
86
-
87
- ```py
88
- >>> from diffusers import DDPMPipeline
89
-
90
- >>> # load model and scheduler
91
- >>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
92
-
93
- >>> # run pipeline in inference (sample random noise and denoise)
94
- >>> image = pipe().images[0]
95
-
96
- >>> # save image
97
- >>> image.save("ddpm_generated_image.png")
98
- ```
99
-
100
- Returns:
101
- [`~pipelines.ImagePipelineOutput`] or `tuple`:
102
- If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
103
- returned where the first element is a list with the generated images
104
- """
105
- # Sample gaussian noise to begin loop
106
- if isinstance(self.unet.config.sample_size, int):
107
- image_shape = (
108
- batch_size,
109
- self.unet.config.in_channels,
110
- self.unet.config.sample_size,
111
- self.unet.config.sample_size,
112
- )
113
- else:
114
- image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
115
-
116
- if self.device.type == "mps":
117
- # randn does not work reproducibly on mps
118
- image = randn_tensor(image_shape, generator=generator)
119
- image = image.to(self.device)
120
- else:
121
- image = randn_tensor(image_shape, generator=generator, device=self.device)
122
-
123
- # set step values
124
- self.scheduler.set_timesteps(num_inference_steps)
125
-
126
- for t in self.progress_bar(self.scheduler.timesteps):
127
- with torch.enable_grad():
128
- # 1. predict noise model_output
129
- image = image.requires_grad_()
130
- model_output = self.unet(image, t).sample
131
-
132
- # 2. compute previous image x'_{t-1} and original prediction x0_{t}
133
- scheduler_out = self.scheduler.step(model_output, t, image, generator=generator)
134
- image_pred, origi_pred = scheduler_out.prev_sample, scheduler_out.pred_original_sample
135
-
136
- # 3. compute y'_t = f(x0_{t})
137
- measurement_pred = operator(origi_pred)
138
-
139
- # 4. compute loss = d(y, y'_t-1)
140
- loss = loss_fn(measurement, measurement_pred)
141
- loss.backward()
142
-
143
- print("distance: {0:.4f}".format(loss.item()))
144
-
145
- with torch.no_grad():
146
- image_pred = image_pred - zeta * image.grad
147
- image = image_pred.detach()
148
-
149
- image = (image / 2 + 0.5).clamp(0, 1)
150
- image = image.cpu().permute(0, 2, 3, 1).numpy()
151
- if output_type == "pil":
152
- image = self.numpy_to_pil(image)
153
-
154
- if not return_dict:
155
- return (image,)
156
-
157
- return ImagePipelineOutput(images=image)
158
-
159
-
160
- if __name__ == "__main__":
161
- import scipy
162
- from torch import nn
163
- from torchvision.utils import save_image
164
-
165
- # defining the operators f(.) of y = f(x)
166
- # super-resolution operator
167
- class SuperResolutionOperator(nn.Module):
168
- def __init__(self, in_shape, scale_factor):
169
- super().__init__()
170
-
171
- # Resizer local class, do not use outiside the SR operator class
172
- class Resizer(nn.Module):
173
- def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True):
174
- super(Resizer, self).__init__()
175
-
176
- # First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa
177
- scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor)
178
-
179
- # Choose interpolation method, each method has the matching kernel size
180
- def cubic(x):
181
- absx = np.abs(x)
182
- absx2 = absx**2
183
- absx3 = absx**3
184
- return (1.5 * absx3 - 2.5 * absx2 + 1) * (absx <= 1) + (
185
- -0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2
186
- ) * ((1 < absx) & (absx <= 2))
187
-
188
- def lanczos2(x):
189
- return (
190
- (np.sin(pi * x) * np.sin(pi * x / 2) + np.finfo(np.float32).eps)
191
- / ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps)
192
- ) * (abs(x) < 2)
193
-
194
- def box(x):
195
- return ((-0.5 <= x) & (x < 0.5)) * 1.0
196
-
197
- def lanczos3(x):
198
- return (
199
- (np.sin(pi * x) * np.sin(pi * x / 3) + np.finfo(np.float32).eps)
200
- / ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps)
201
- ) * (abs(x) < 3)
202
-
203
- def linear(x):
204
- return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
205
-
206
- method, kernel_width = {
207
- "cubic": (cubic, 4.0),
208
- "lanczos2": (lanczos2, 4.0),
209
- "lanczos3": (lanczos3, 6.0),
210
- "box": (box, 1.0),
211
- "linear": (linear, 2.0),
212
- None: (cubic, 4.0), # set default interpolation method as cubic
213
- }.get(kernel)
214
-
215
- # Antialiasing is only used when downscaling
216
- antialiasing *= np.any(np.array(scale_factor) < 1)
217
-
218
- # Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient
219
- sorted_dims = np.argsort(np.array(scale_factor))
220
- self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1]
221
-
222
- # Iterate over dimensions to calculate local weights for resizing and resize each time in one direction
223
- field_of_view_list = []
224
- weights_list = []
225
- for dim in self.sorted_dims:
226
- # for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the
227
- # weights that multiply the values there to get its result.
228
- weights, field_of_view = self.contributions(
229
- in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing
230
- )
231
-
232
- # convert to torch tensor
233
- weights = torch.tensor(weights.T, dtype=torch.float32)
234
-
235
- # We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for
236
- # tmp_im[field_of_view.T], (bsxfun style)
237
- weights_list.append(
238
- nn.Parameter(
239
- torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]),
240
- requires_grad=False,
241
- )
242
- )
243
- field_of_view_list.append(
244
- nn.Parameter(
245
- torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long), requires_grad=False
246
- )
247
- )
248
-
249
- self.field_of_view = nn.ParameterList(field_of_view_list)
250
- self.weights = nn.ParameterList(weights_list)
251
-
252
- def forward(self, in_tensor):
253
- x = in_tensor
254
-
255
- # Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim
256
- for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights):
257
- # To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize
258
- x = torch.transpose(x, dim, 0)
259
-
260
- # This is a bit of a complicated multiplication: x[field_of_view.T] is a tensor of order image_dims+1.
261
- # for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim
262
- # only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with
263
- # the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:
264
- # matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the
265
- # same number
266
- x = torch.sum(x[fov] * w, dim=0)
267
-
268
- # Finally we swap back the axes to the original order
269
- x = torch.transpose(x, dim, 0)
270
-
271
- return x
272
-
273
- def fix_scale_and_size(self, input_shape, output_shape, scale_factor):
274
- # First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the
275
- # same size as the number of input dimensions)
276
- if scale_factor is not None:
277
- # By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.
278
- if np.isscalar(scale_factor) and len(input_shape) > 1:
279
- scale_factor = [scale_factor, scale_factor]
280
-
281
- # We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales
282
- scale_factor = list(scale_factor)
283
- scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor
284
-
285
- # Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size
286
- # to all the unspecified dimensions
287
- if output_shape is not None:
288
- output_shape = list(input_shape[len(output_shape) :]) + list(np.uint(np.array(output_shape)))
289
-
290
- # Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is
291
- # sub-optimal, because there can be different scales to the same output-shape.
292
- if scale_factor is None:
293
- scale_factor = 1.0 * np.array(output_shape) / np.array(input_shape)
294
-
295
- # Dealing with missing output-shape. calculating according to scale-factor
296
- if output_shape is None:
297
- output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))
298
-
299
- return scale_factor, output_shape
300
-
301
- def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing):
302
- # This function calculates a set of 'filters' and a set of field_of_view that will later on be applied
303
- # such that each position from the field_of_view will be multiplied with a matching filter from the
304
- # 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers
305
- # around it. This is only done for one dimension of the image.
306
-
307
- # When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of
308
- # 1/sf. this means filtering is more 'low-pass filter'.
309
- fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing else kernel
310
- kernel_width *= 1.0 / scale if antialiasing else 1.0
311
-
312
- # These are the coordinates of the output image
313
- out_coordinates = np.arange(1, out_length + 1)
314
-
315
- # since both scale-factor and output size can be provided simulatneously, perserving the center of the image requires shifting
316
- # the output coordinates. the deviation is because out_length doesn't necesary equal in_length*scale.
317
- # to keep the center we need to subtract half of this deivation so that we get equal margins for boths sides and center is preserved.
318
- shifted_out_coordinates = out_coordinates - (out_length - in_length * scale) / 2
319
-
320
- # These are the matching positions of the output-coordinates on the input image coordinates.
321
- # Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:
322
- # [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.
323
- # The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to
324
- # the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big
325
- # one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).
326
- # So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is
327
- # at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:
328
- # (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)
329
- match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale)
330
-
331
- # This is the left boundary to start multiplying the filter from, it depends on the size of the filter
332
- left_boundary = np.floor(match_coordinates - kernel_width / 2)
333
-
334
- # Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers
335
- # of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)
336
- expanded_kernel_width = np.ceil(kernel_width) + 2
337
-
338
- # Determine a set of field_of_view for each each output position, these are the pixels in the input image
339
- # that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the
340
- # vertical dim is the pixels it 'sees' (kernel_size + 2)
341
- field_of_view = np.squeeze(
342
- np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1)
343
- )
344
-
345
- # Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the
346
- # vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
347
- # 'field_of_view')
348
- weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
349
-
350
- # Normalize weights to sum up to 1. be careful from dividing by 0
351
- sum_weights = np.sum(weights, axis=1)
352
- sum_weights[sum_weights == 0] = 1.0
353
- weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)
354
-
355
- # We use this mirror structure as a trick for reflection padding at the boundaries
356
- mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))
357
- field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]
358
-
359
- # Get rid of weights and pixel positions that are of zero weight
360
- non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))
361
- weights = np.squeeze(weights[:, non_zero_out_pixels])
362
- field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])
363
-
364
- # Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size
365
- return weights, field_of_view
366
-
367
- self.down_sample = Resizer(in_shape, 1 / scale_factor)
368
- for param in self.parameters():
369
- param.requires_grad = False
370
-
371
- def forward(self, data, **kwargs):
372
- return self.down_sample(data)
373
-
374
- # Gaussian blurring operator
375
- class GaussialBlurOperator(nn.Module):
376
- def __init__(self, kernel_size, intensity):
377
- super().__init__()
378
-
379
- class Blurkernel(nn.Module):
380
- def __init__(self, blur_type="gaussian", kernel_size=31, std=3.0):
381
- super().__init__()
382
- self.blur_type = blur_type
383
- self.kernel_size = kernel_size
384
- self.std = std
385
- self.seq = nn.Sequential(
386
- nn.ReflectionPad2d(self.kernel_size // 2),
387
- nn.Conv2d(3, 3, self.kernel_size, stride=1, padding=0, bias=False, groups=3),
388
- )
389
- self.weights_init()
390
-
391
- def forward(self, x):
392
- return self.seq(x)
393
-
394
- def weights_init(self):
395
- if self.blur_type == "gaussian":
396
- n = np.zeros((self.kernel_size, self.kernel_size))
397
- n[self.kernel_size // 2, self.kernel_size // 2] = 1
398
- k = scipy.ndimage.gaussian_filter(n, sigma=self.std)
399
- k = torch.from_numpy(k)
400
- self.k = k
401
- for name, f in self.named_parameters():
402
- f.data.copy_(k)
403
-
404
- def update_weights(self, k):
405
- if not torch.is_tensor(k):
406
- k = torch.from_numpy(k)
407
- for name, f in self.named_parameters():
408
- f.data.copy_(k)
409
-
410
- def get_kernel(self):
411
- return self.k
412
-
413
- self.kernel_size = kernel_size
414
- self.conv = Blurkernel(blur_type="gaussian", kernel_size=kernel_size, std=intensity)
415
- self.kernel = self.conv.get_kernel()
416
- self.conv.update_weights(self.kernel.type(torch.float32))
417
-
418
- for param in self.parameters():
419
- param.requires_grad = False
420
-
421
- def forward(self, data, **kwargs):
422
- return self.conv(data)
423
-
424
- def transpose(self, data, **kwargs):
425
- return data
426
-
427
- def get_kernel(self):
428
- return self.kernel.view(1, 1, self.kernel_size, self.kernel_size)
429
-
430
- # assuming the forward process y = f(x) is polluted by Gaussian noise, use l2 norm
431
- def RMSELoss(yhat, y):
432
- return torch.sqrt(torch.sum((yhat - y) ** 2))
433
-
434
- # set up source image
435
- src = Image.open("sample.png")
436
- # read image into [1,3,H,W]
437
- src = torch.from_numpy(np.array(src, dtype=np.float32)).permute(2, 0, 1)[None]
438
- # normalize image to [-1,1]
439
- src = (src / 127.5) - 1.0
440
- src = src.to("cuda")
441
-
442
- # set up operator and measurement
443
- # operator = SuperResolutionOperator(in_shape=src.shape, scale_factor=4).to("cuda")
444
- operator = GaussialBlurOperator(kernel_size=61, intensity=3.0).to("cuda")
445
- measurement = operator(src)
446
-
447
- # set up scheduler
448
- scheduler = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256")
449
- scheduler.set_timesteps(1000)
450
-
451
- # set up model
452
- model = UNet2DModel.from_pretrained("google/ddpm-celebahq-256").to("cuda")
453
-
454
- save_image((src + 1.0) / 2.0, "dps_src.png")
455
- save_image((measurement + 1.0) / 2.0, "dps_mea.png")
456
-
457
- # finally, the pipeline
458
- dpspipe = DPSPipeline(model, scheduler)
459
- image = dpspipe(
460
- measurement=measurement,
461
- operator=operator,
462
- loss_fn=RMSELoss,
463
- zeta=1.0,
464
- ).images[0]
465
-
466
- image.save("dps_generated_image.png")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
edict_pipeline.py DELETED
@@ -1,264 +0,0 @@
1
- from typing import Optional
2
-
3
- import torch
4
- from PIL import Image
5
- from tqdm.auto import tqdm
6
- from transformers import CLIPTextModel, CLIPTokenizer
7
-
8
- from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel
9
- from diffusers.image_processor import VaeImageProcessor
10
- from diffusers.utils import (
11
- deprecate,
12
- )
13
-
14
-
15
- class EDICTPipeline(DiffusionPipeline):
16
- def __init__(
17
- self,
18
- vae: AutoencoderKL,
19
- text_encoder: CLIPTextModel,
20
- tokenizer: CLIPTokenizer,
21
- unet: UNet2DConditionModel,
22
- scheduler: DDIMScheduler,
23
- mixing_coeff: float = 0.93,
24
- leapfrog_steps: bool = True,
25
- ):
26
- self.mixing_coeff = mixing_coeff
27
- self.leapfrog_steps = leapfrog_steps
28
-
29
- super().__init__()
30
- self.register_modules(
31
- vae=vae,
32
- text_encoder=text_encoder,
33
- tokenizer=tokenizer,
34
- unet=unet,
35
- scheduler=scheduler,
36
- )
37
-
38
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
39
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
40
-
41
- def _encode_prompt(
42
- self, prompt: str, negative_prompt: Optional[str] = None, do_classifier_free_guidance: bool = False
43
- ):
44
- text_inputs = self.tokenizer(
45
- prompt,
46
- padding="max_length",
47
- max_length=self.tokenizer.model_max_length,
48
- truncation=True,
49
- return_tensors="pt",
50
- )
51
-
52
- prompt_embeds = self.text_encoder(text_inputs.input_ids.to(self.device)).last_hidden_state
53
-
54
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=self.device)
55
-
56
- if do_classifier_free_guidance:
57
- uncond_tokens = "" if negative_prompt is None else negative_prompt
58
-
59
- uncond_input = self.tokenizer(
60
- uncond_tokens,
61
- padding="max_length",
62
- max_length=self.tokenizer.model_max_length,
63
- truncation=True,
64
- return_tensors="pt",
65
- )
66
-
67
- negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device)).last_hidden_state
68
-
69
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
70
-
71
- return prompt_embeds
72
-
73
- def denoise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
74
- x = self.mixing_coeff * x + (1 - self.mixing_coeff) * y
75
- y = self.mixing_coeff * y + (1 - self.mixing_coeff) * x
76
-
77
- return [x, y]
78
-
79
- def noise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
80
- y = (y - (1 - self.mixing_coeff) * x) / self.mixing_coeff
81
- x = (x - (1 - self.mixing_coeff) * y) / self.mixing_coeff
82
-
83
- return [x, y]
84
-
85
- def _get_alpha_and_beta(self, t: torch.Tensor):
86
- # as self.alphas_cumprod is always in cpu
87
- t = int(t)
88
-
89
- alpha_prod = self.scheduler.alphas_cumprod[t] if t >= 0 else self.scheduler.final_alpha_cumprod
90
-
91
- return alpha_prod, 1 - alpha_prod
92
-
93
- def noise_step(
94
- self,
95
- base: torch.Tensor,
96
- model_input: torch.Tensor,
97
- model_output: torch.Tensor,
98
- timestep: torch.Tensor,
99
- ):
100
- prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
101
-
102
- alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
103
- alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
104
-
105
- a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
106
- b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
107
-
108
- next_model_input = (base - b_t * model_output) / a_t
109
-
110
- return model_input, next_model_input.to(base.dtype)
111
-
112
- def denoise_step(
113
- self,
114
- base: torch.Tensor,
115
- model_input: torch.Tensor,
116
- model_output: torch.Tensor,
117
- timestep: torch.Tensor,
118
- ):
119
- prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
120
-
121
- alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
122
- alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
123
-
124
- a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
125
- b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
126
- next_model_input = a_t * base + b_t * model_output
127
-
128
- return model_input, next_model_input.to(base.dtype)
129
-
130
- @torch.no_grad()
131
- def decode_latents(self, latents: torch.Tensor):
132
- latents = 1 / self.vae.config.scaling_factor * latents
133
- image = self.vae.decode(latents).sample
134
- image = (image / 2 + 0.5).clamp(0, 1)
135
- return image
136
-
137
- @torch.no_grad()
138
- def prepare_latents(
139
- self,
140
- image: Image.Image,
141
- text_embeds: torch.Tensor,
142
- timesteps: torch.Tensor,
143
- guidance_scale: float,
144
- generator: Optional[torch.Generator] = None,
145
- ):
146
- do_classifier_free_guidance = guidance_scale > 1.0
147
-
148
- image = image.to(device=self.device, dtype=text_embeds.dtype)
149
- latent = self.vae.encode(image).latent_dist.sample(generator)
150
-
151
- latent = self.vae.config.scaling_factor * latent
152
-
153
- coupled_latents = [latent.clone(), latent.clone()]
154
-
155
- for i, t in tqdm(enumerate(timesteps), total=len(timesteps)):
156
- coupled_latents = self.noise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
157
-
158
- # j - model_input index, k - base index
159
- for j in range(2):
160
- k = j ^ 1
161
-
162
- if self.leapfrog_steps:
163
- if i % 2 == 0:
164
- k, j = j, k
165
-
166
- model_input = coupled_latents[j]
167
- base = coupled_latents[k]
168
-
169
- latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
170
-
171
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeds).sample
172
-
173
- if do_classifier_free_guidance:
174
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
175
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
176
-
177
- base, model_input = self.noise_step(
178
- base=base,
179
- model_input=model_input,
180
- model_output=noise_pred,
181
- timestep=t,
182
- )
183
-
184
- coupled_latents[k] = model_input
185
-
186
- return coupled_latents
187
-
188
- @torch.no_grad()
189
- def __call__(
190
- self,
191
- base_prompt: str,
192
- target_prompt: str,
193
- image: Image.Image,
194
- guidance_scale: float = 3.0,
195
- num_inference_steps: int = 50,
196
- strength: float = 0.8,
197
- negative_prompt: Optional[str] = None,
198
- generator: Optional[torch.Generator] = None,
199
- output_type: Optional[str] = "pil",
200
- ):
201
- do_classifier_free_guidance = guidance_scale > 1.0
202
-
203
- image = self.image_processor.preprocess(image)
204
-
205
- base_embeds = self._encode_prompt(base_prompt, negative_prompt, do_classifier_free_guidance)
206
- target_embeds = self._encode_prompt(target_prompt, negative_prompt, do_classifier_free_guidance)
207
-
208
- self.scheduler.set_timesteps(num_inference_steps, self.device)
209
-
210
- t_limit = num_inference_steps - int(num_inference_steps * strength)
211
- fwd_timesteps = self.scheduler.timesteps[t_limit:]
212
- bwd_timesteps = fwd_timesteps.flip(0)
213
-
214
- coupled_latents = self.prepare_latents(image, base_embeds, bwd_timesteps, guidance_scale, generator)
215
-
216
- for i, t in tqdm(enumerate(fwd_timesteps), total=len(fwd_timesteps)):
217
- # j - model_input index, k - base index
218
- for k in range(2):
219
- j = k ^ 1
220
-
221
- if self.leapfrog_steps:
222
- if i % 2 == 1:
223
- k, j = j, k
224
-
225
- model_input = coupled_latents[j]
226
- base = coupled_latents[k]
227
-
228
- latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
229
-
230
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=target_embeds).sample
231
-
232
- if do_classifier_free_guidance:
233
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
234
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
235
-
236
- base, model_input = self.denoise_step(
237
- base=base,
238
- model_input=model_input,
239
- model_output=noise_pred,
240
- timestep=t,
241
- )
242
-
243
- coupled_latents[k] = model_input
244
-
245
- coupled_latents = self.denoise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
246
-
247
- # either one is fine
248
- final_latent = coupled_latents[0]
249
-
250
- if output_type not in ["latent", "pt", "np", "pil"]:
251
- deprecation_message = (
252
- f"the output_type {output_type} is outdated. Please make sure to set it to one of these instead: "
253
- "`pil`, `np`, `pt`, `latent`"
254
- )
255
- deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
256
- output_type = "np"
257
-
258
- if output_type == "latent":
259
- image = final_latent
260
- else:
261
- image = self.decode_latents(final_latent)
262
- image = self.image_processor.postprocess(image, output_type=output_type)
263
-
264
- return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fresco_v2v.py DELETED
The diff for this file is too large to render. See raw diff
 
gluegen.py DELETED
@@ -1,816 +0,0 @@
1
- import inspect
2
- from typing import Any, Dict, List, Optional, Union
3
-
4
- import torch
5
- import torch.nn as nn
6
- from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor
7
-
8
- from diffusers import DiffusionPipeline
9
- from diffusers.image_processor import VaeImageProcessor
10
- from diffusers.loaders import LoraLoaderMixin
11
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
- from diffusers.models.lora import adjust_lora_scale_text_encoder
13
- from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
14
- from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
15
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
16
- from diffusers.schedulers import KarrasDiffusionSchedulers
17
- from diffusers.utils import (
18
- USE_PEFT_BACKEND,
19
- logging,
20
- scale_lora_layers,
21
- unscale_lora_layers,
22
- )
23
- from diffusers.utils.torch_utils import randn_tensor
24
-
25
-
26
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
27
-
28
-
29
- class TranslatorBase(nn.Module):
30
- def __init__(self, num_tok, dim, dim_out, mult=2):
31
- super().__init__()
32
-
33
- self.dim_in = dim
34
- self.dim_out = dim_out
35
-
36
- self.net_tok = nn.Sequential(
37
- nn.Linear(num_tok, int(num_tok * mult)),
38
- nn.LayerNorm(int(num_tok * mult)),
39
- nn.GELU(),
40
- nn.Linear(int(num_tok * mult), int(num_tok * mult)),
41
- nn.LayerNorm(int(num_tok * mult)),
42
- nn.GELU(),
43
- nn.Linear(int(num_tok * mult), num_tok),
44
- nn.LayerNorm(num_tok),
45
- )
46
-
47
- self.net_sen = nn.Sequential(
48
- nn.Linear(dim, int(dim * mult)),
49
- nn.LayerNorm(int(dim * mult)),
50
- nn.GELU(),
51
- nn.Linear(int(dim * mult), int(dim * mult)),
52
- nn.LayerNorm(int(dim * mult)),
53
- nn.GELU(),
54
- nn.Linear(int(dim * mult), dim_out),
55
- nn.LayerNorm(dim_out),
56
- )
57
-
58
- def forward(self, x):
59
- if self.dim_in == self.dim_out:
60
- indentity_0 = x
61
- x = self.net_sen(x)
62
- x += indentity_0
63
- x = x.transpose(1, 2)
64
-
65
- indentity_1 = x
66
- x = self.net_tok(x)
67
- x += indentity_1
68
- x = x.transpose(1, 2)
69
- else:
70
- x = self.net_sen(x)
71
- x = x.transpose(1, 2)
72
-
73
- x = self.net_tok(x)
74
- x = x.transpose(1, 2)
75
- return x
76
-
77
-
78
- class TranslatorBaseNoLN(nn.Module):
79
- def __init__(self, num_tok, dim, dim_out, mult=2):
80
- super().__init__()
81
-
82
- self.dim_in = dim
83
- self.dim_out = dim_out
84
-
85
- self.net_tok = nn.Sequential(
86
- nn.Linear(num_tok, int(num_tok * mult)),
87
- nn.GELU(),
88
- nn.Linear(int(num_tok * mult), int(num_tok * mult)),
89
- nn.GELU(),
90
- nn.Linear(int(num_tok * mult), num_tok),
91
- )
92
-
93
- self.net_sen = nn.Sequential(
94
- nn.Linear(dim, int(dim * mult)),
95
- nn.GELU(),
96
- nn.Linear(int(dim * mult), int(dim * mult)),
97
- nn.GELU(),
98
- nn.Linear(int(dim * mult), dim_out),
99
- )
100
-
101
- def forward(self, x):
102
- if self.dim_in == self.dim_out:
103
- indentity_0 = x
104
- x = self.net_sen(x)
105
- x += indentity_0
106
- x = x.transpose(1, 2)
107
-
108
- indentity_1 = x
109
- x = self.net_tok(x)
110
- x += indentity_1
111
- x = x.transpose(1, 2)
112
- else:
113
- x = self.net_sen(x)
114
- x = x.transpose(1, 2)
115
-
116
- x = self.net_tok(x)
117
- x = x.transpose(1, 2)
118
- return x
119
-
120
-
121
- class TranslatorNoLN(nn.Module):
122
- def __init__(self, num_tok, dim, dim_out, mult=2, depth=5):
123
- super().__init__()
124
-
125
- self.blocks = nn.ModuleList([TranslatorBase(num_tok, dim, dim, mult=2) for d in range(depth)])
126
- self.gelu = nn.GELU()
127
-
128
- self.tail = TranslatorBaseNoLN(num_tok, dim, dim_out, mult=2)
129
-
130
- def forward(self, x):
131
- for block in self.blocks:
132
- x = block(x) + x
133
- x = self.gelu(x)
134
-
135
- x = self.tail(x)
136
- return x
137
-
138
-
139
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
140
- """
141
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
142
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
143
- """
144
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
145
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
146
- # rescale the results from guidance (fixes overexposure)
147
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
148
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
149
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
150
- return noise_cfg
151
-
152
-
153
- def retrieve_timesteps(
154
- scheduler,
155
- num_inference_steps: Optional[int] = None,
156
- device: Optional[Union[str, torch.device]] = None,
157
- timesteps: Optional[List[int]] = None,
158
- **kwargs,
159
- ):
160
- """
161
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
162
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
163
-
164
- Args:
165
- scheduler (`SchedulerMixin`):
166
- The scheduler to get timesteps from.
167
- num_inference_steps (`int`):
168
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
169
- `timesteps` must be `None`.
170
- device (`str` or `torch.device`, *optional*):
171
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
172
- timesteps (`List[int]`, *optional*):
173
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
174
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
175
- must be `None`.
176
-
177
- Returns:
178
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
179
- second element is the number of inference steps.
180
- """
181
- if timesteps is not None:
182
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
183
- if not accepts_timesteps:
184
- raise ValueError(
185
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
186
- f" timestep schedules. Please check whether you are using the correct scheduler."
187
- )
188
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
189
- timesteps = scheduler.timesteps
190
- num_inference_steps = len(timesteps)
191
- else:
192
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
193
- timesteps = scheduler.timesteps
194
- return timesteps, num_inference_steps
195
-
196
-
197
- class GlueGenStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, LoraLoaderMixin):
198
- def __init__(
199
- self,
200
- vae: AutoencoderKL,
201
- text_encoder: AutoModel,
202
- tokenizer: AutoTokenizer,
203
- unet: UNet2DConditionModel,
204
- scheduler: KarrasDiffusionSchedulers,
205
- safety_checker: StableDiffusionSafetyChecker,
206
- feature_extractor: CLIPImageProcessor,
207
- language_adapter: TranslatorNoLN = None,
208
- tensor_norm: torch.Tensor = None,
209
- requires_safety_checker: bool = True,
210
- ):
211
- super().__init__()
212
-
213
- self.register_modules(
214
- vae=vae,
215
- text_encoder=text_encoder,
216
- tokenizer=tokenizer,
217
- unet=unet,
218
- scheduler=scheduler,
219
- safety_checker=safety_checker,
220
- feature_extractor=feature_extractor,
221
- language_adapter=language_adapter,
222
- tensor_norm=tensor_norm,
223
- )
224
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
225
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
226
- self.register_to_config(requires_safety_checker=requires_safety_checker)
227
-
228
- def load_language_adapter(
229
- self,
230
- model_path: str,
231
- num_token: int,
232
- dim: int,
233
- dim_out: int,
234
- tensor_norm: torch.Tensor,
235
- mult: int = 2,
236
- depth: int = 5,
237
- ):
238
- device = self._execution_device
239
- self.tensor_norm = tensor_norm.to(device)
240
- self.language_adapter = TranslatorNoLN(num_tok=num_token, dim=dim, dim_out=dim_out, mult=mult, depth=depth).to(
241
- device
242
- )
243
- self.language_adapter.load_state_dict(torch.load(model_path))
244
-
245
- def _adapt_language(self, prompt_embeds: torch.Tensor):
246
- prompt_embeds = prompt_embeds / 3
247
- prompt_embeds = self.language_adapter(prompt_embeds) * (self.tensor_norm / 2)
248
- return prompt_embeds
249
-
250
- def encode_prompt(
251
- self,
252
- prompt,
253
- device,
254
- num_images_per_prompt,
255
- do_classifier_free_guidance,
256
- negative_prompt=None,
257
- prompt_embeds: Optional[torch.Tensor] = None,
258
- negative_prompt_embeds: Optional[torch.Tensor] = None,
259
- lora_scale: Optional[float] = None,
260
- clip_skip: Optional[int] = None,
261
- ):
262
- r"""
263
- Encodes the prompt into text encoder hidden states.
264
-
265
- Args:
266
- prompt (`str` or `List[str]`, *optional*):
267
- prompt to be encoded
268
- device: (`torch.device`):
269
- torch device
270
- num_images_per_prompt (`int`):
271
- number of images that should be generated per prompt
272
- do_classifier_free_guidance (`bool`):
273
- whether to use classifier free guidance or not
274
- negative_prompt (`str` or `List[str]`, *optional*):
275
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
276
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
277
- less than `1`).
278
- prompt_embeds (`torch.Tensor`, *optional*):
279
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
280
- provided, text embeddings will be generated from `prompt` input argument.
281
- negative_prompt_embeds (`torch.Tensor`, *optional*):
282
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
283
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
284
- argument.
285
- lora_scale (`float`, *optional*):
286
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
287
- clip_skip (`int`, *optional*):
288
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
289
- the output of the pre-final layer will be used for computing the prompt embeddings.
290
- """
291
- # set lora scale so that monkey patched LoRA
292
- # function of text encoder can correctly access it
293
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
294
- self._lora_scale = lora_scale
295
-
296
- # dynamically adjust the LoRA scale
297
- if not USE_PEFT_BACKEND:
298
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
299
- else:
300
- scale_lora_layers(self.text_encoder, lora_scale)
301
-
302
- if prompt is not None and isinstance(prompt, str):
303
- batch_size = 1
304
- elif prompt is not None and isinstance(prompt, list):
305
- batch_size = len(prompt)
306
- else:
307
- batch_size = prompt_embeds.shape[0]
308
-
309
- if prompt_embeds is None:
310
- text_inputs = self.tokenizer(
311
- prompt,
312
- padding="max_length",
313
- max_length=self.tokenizer.model_max_length,
314
- truncation=True,
315
- return_tensors="pt",
316
- )
317
- text_input_ids = text_inputs.input_ids
318
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
319
-
320
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
321
- text_input_ids, untruncated_ids
322
- ):
323
- removed_text = self.tokenizer.batch_decode(
324
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
325
- )
326
- logger.warning(
327
- "The following part of your input was truncated because CLIP can only handle sequences up to"
328
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
329
- )
330
-
331
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
332
- attention_mask = text_inputs.attention_mask.to(device)
333
- elif self.language_adapter is not None:
334
- attention_mask = text_inputs.attention_mask.to(device)
335
- else:
336
- attention_mask = None
337
-
338
- if clip_skip is None:
339
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
340
- prompt_embeds = prompt_embeds[0]
341
-
342
- else:
343
- prompt_embeds = self.text_encoder(
344
- text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
345
- )
346
- # Access the `hidden_states` first, that contains a tuple of
347
- # all the hidden states from the encoder layers. Then index into
348
- # the tuple to access the hidden states from the desired layer.
349
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
350
- # We also need to apply the final LayerNorm here to not mess with the
351
- # representations. The `last_hidden_states` that we typically use for
352
- # obtaining the final prompt representations passes through the LayerNorm
353
- # layer.
354
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
355
-
356
- # Run prompt language adapter
357
- if self.language_adapter is not None:
358
- prompt_embeds = self._adapt_language(prompt_embeds)
359
-
360
- if self.text_encoder is not None:
361
- prompt_embeds_dtype = self.text_encoder.dtype
362
- elif self.unet is not None:
363
- prompt_embeds_dtype = self.unet.dtype
364
- else:
365
- prompt_embeds_dtype = prompt_embeds.dtype
366
-
367
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
368
-
369
- bs_embed, seq_len, _ = prompt_embeds.shape
370
- # duplicate text embeddings for each generation per prompt, using mps friendly method
371
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
372
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
373
-
374
- # get unconditional embeddings for classifier free guidance
375
- if do_classifier_free_guidance and negative_prompt_embeds is None:
376
- uncond_tokens: List[str]
377
- if negative_prompt is None:
378
- uncond_tokens = [""] * batch_size
379
- elif prompt is not None and type(prompt) is not type(negative_prompt):
380
- raise TypeError(
381
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
382
- f" {type(prompt)}."
383
- )
384
- elif isinstance(negative_prompt, str):
385
- uncond_tokens = [negative_prompt]
386
- elif batch_size != len(negative_prompt):
387
- raise ValueError(
388
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
389
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
390
- " the batch size of `prompt`."
391
- )
392
- else:
393
- uncond_tokens = negative_prompt
394
-
395
- max_length = prompt_embeds.shape[1]
396
- uncond_input = self.tokenizer(
397
- uncond_tokens,
398
- padding="max_length",
399
- max_length=max_length,
400
- truncation=True,
401
- return_tensors="pt",
402
- )
403
-
404
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
405
- attention_mask = uncond_input.attention_mask.to(device)
406
- else:
407
- attention_mask = None
408
-
409
- negative_prompt_embeds = self.text_encoder(
410
- uncond_input.input_ids.to(device),
411
- attention_mask=attention_mask,
412
- )
413
- negative_prompt_embeds = negative_prompt_embeds[0]
414
- # Run negative prompt language adapter
415
- if self.language_adapter is not None:
416
- negative_prompt_embeds = self._adapt_language(negative_prompt_embeds)
417
-
418
- if do_classifier_free_guidance:
419
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
420
- seq_len = negative_prompt_embeds.shape[1]
421
-
422
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
423
-
424
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
425
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
426
-
427
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
428
- # Retrieve the original scale by scaling back the LoRA layers
429
- unscale_lora_layers(self.text_encoder, lora_scale)
430
-
431
- return prompt_embeds, negative_prompt_embeds
432
-
433
- def run_safety_checker(self, image, device, dtype):
434
- if self.safety_checker is None:
435
- has_nsfw_concept = None
436
- else:
437
- if torch.is_tensor(image):
438
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
439
- else:
440
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
441
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
442
- image, has_nsfw_concept = self.safety_checker(
443
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
444
- )
445
- return image, has_nsfw_concept
446
-
447
- def prepare_extra_step_kwargs(self, generator, eta):
448
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
449
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
450
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
451
- # and should be between [0, 1]
452
-
453
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
454
- extra_step_kwargs = {}
455
- if accepts_eta:
456
- extra_step_kwargs["eta"] = eta
457
-
458
- # check if the scheduler accepts generator
459
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
460
- if accepts_generator:
461
- extra_step_kwargs["generator"] = generator
462
- return extra_step_kwargs
463
-
464
- def check_inputs(
465
- self,
466
- prompt,
467
- height,
468
- width,
469
- negative_prompt=None,
470
- prompt_embeds=None,
471
- negative_prompt_embeds=None,
472
- ):
473
- if height % 8 != 0 or width % 8 != 0:
474
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
475
-
476
- if prompt is not None and prompt_embeds is not None:
477
- raise ValueError(
478
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
479
- " only forward one of the two."
480
- )
481
- elif prompt is None and prompt_embeds is None:
482
- raise ValueError(
483
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
484
- )
485
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
486
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
487
-
488
- if negative_prompt is not None and negative_prompt_embeds is not None:
489
- raise ValueError(
490
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
491
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
492
- )
493
-
494
- if prompt_embeds is not None and negative_prompt_embeds is not None:
495
- if prompt_embeds.shape != negative_prompt_embeds.shape:
496
- raise ValueError(
497
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
498
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
499
- f" {negative_prompt_embeds.shape}."
500
- )
501
-
502
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
503
- shape = (
504
- batch_size,
505
- num_channels_latents,
506
- int(height) // self.vae_scale_factor,
507
- int(width) // self.vae_scale_factor,
508
- )
509
- if isinstance(generator, list) and len(generator) != batch_size:
510
- raise ValueError(
511
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
512
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
513
- )
514
-
515
- if latents is None:
516
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
517
- else:
518
- latents = latents.to(device)
519
-
520
- # scale the initial noise by the standard deviation required by the scheduler
521
- latents = latents * self.scheduler.init_noise_sigma
522
- return latents
523
-
524
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
525
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
526
- """
527
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
528
-
529
- Args:
530
- timesteps (`torch.Tensor`):
531
- generate embedding vectors at these timesteps
532
- embedding_dim (`int`, *optional*, defaults to 512):
533
- dimension of the embeddings to generate
534
- dtype:
535
- data type of the generated embeddings
536
-
537
- Returns:
538
- `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
539
- """
540
- assert len(w.shape) == 1
541
- w = w * 1000.0
542
-
543
- half_dim = embedding_dim // 2
544
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
545
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
546
- emb = w.to(dtype)[:, None] * emb[None, :]
547
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
548
- if embedding_dim % 2 == 1: # zero pad
549
- emb = torch.nn.functional.pad(emb, (0, 1))
550
- assert emb.shape == (w.shape[0], embedding_dim)
551
- return emb
552
-
553
- @property
554
- def guidance_scale(self):
555
- return self._guidance_scale
556
-
557
- @property
558
- def guidance_rescale(self):
559
- return self._guidance_rescale
560
-
561
- @property
562
- def clip_skip(self):
563
- return self._clip_skip
564
-
565
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
566
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
567
- # corresponds to doing no classifier free guidance.
568
- @property
569
- def do_classifier_free_guidance(self):
570
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
571
-
572
- @property
573
- def cross_attention_kwargs(self):
574
- return self._cross_attention_kwargs
575
-
576
- @property
577
- def num_timesteps(self):
578
- return self._num_timesteps
579
-
580
- @property
581
- def interrupt(self):
582
- return self._interrupt
583
-
584
- @torch.no_grad()
585
- def __call__(
586
- self,
587
- prompt: Union[str, List[str]] = None,
588
- height: Optional[int] = None,
589
- width: Optional[int] = None,
590
- num_inference_steps: int = 50,
591
- timesteps: List[int] = None,
592
- guidance_scale: float = 7.5,
593
- negative_prompt: Optional[Union[str, List[str]]] = None,
594
- num_images_per_prompt: Optional[int] = 1,
595
- eta: float = 0.0,
596
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
597
- latents: Optional[torch.Tensor] = None,
598
- prompt_embeds: Optional[torch.Tensor] = None,
599
- negative_prompt_embeds: Optional[torch.Tensor] = None,
600
- output_type: Optional[str] = "pil",
601
- return_dict: bool = True,
602
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
603
- guidance_rescale: float = 0.0,
604
- clip_skip: Optional[int] = None,
605
- **kwargs,
606
- ):
607
- r"""
608
- The call function to the pipeline for generation.
609
-
610
- Args:
611
- prompt (`str` or `List[str]`, *optional*):
612
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
613
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
614
- The height in pixels of the generated image.
615
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
616
- The width in pixels of the generated image.
617
- num_inference_steps (`int`, *optional*, defaults to 50):
618
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
619
- expense of slower inference.
620
- timesteps (`List[int]`, *optional*):
621
- Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
622
- in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
623
- passed will be used. Must be in descending order.
624
- guidance_scale (`float`, *optional*, defaults to 7.5):
625
- A higher guidance scale value encourages the model to generate images closely linked to the text
626
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
627
- negative_prompt (`str` or `List[str]`, *optional*):
628
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
629
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
630
- num_images_per_prompt (`int`, *optional*, defaults to 1):
631
- The number of images to generate per prompt.
632
- eta (`float`, *optional*, defaults to 0.0):
633
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
634
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
635
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
636
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
637
- generation deterministic.
638
- latents (`torch.Tensor`, *optional*):
639
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
640
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
641
- tensor is generated by sampling using the supplied random `generator`.
642
- prompt_embeds (`torch.Tensor`, *optional*):
643
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
644
- provided, text embeddings are generated from the `prompt` input argument.
645
- negative_prompt_embeds (`torch.Tensor`, *optional*):
646
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
647
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
648
- ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
649
- output_type (`str`, *optional*, defaults to `"pil"`):
650
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
651
- return_dict (`bool`, *optional*, defaults to `True`):
652
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
653
- plain tuple.
654
- cross_attention_kwargs (`dict`, *optional*):
655
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
656
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
657
- guidance_rescale (`float`, *optional*, defaults to 0.0):
658
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
659
- Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
660
- using zero terminal SNR.
661
- clip_skip (`int`, *optional*):
662
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
663
- the output of the pre-final layer will be used for computing the prompt embeddings.
664
-
665
- Examples:
666
-
667
- Returns:
668
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
669
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
670
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
671
- second element is a list of `bool`s indicating whether the corresponding generated image contains
672
- "not-safe-for-work" (nsfw) content.
673
- """
674
-
675
- # 0. Default height and width to unet
676
- height = height or self.unet.config.sample_size * self.vae_scale_factor
677
- width = width or self.unet.config.sample_size * self.vae_scale_factor
678
- # to deal with lora scaling and other possible forward hooks
679
-
680
- # 1. Check inputs. Raise error if not correct
681
- self.check_inputs(
682
- prompt,
683
- height,
684
- width,
685
- negative_prompt,
686
- prompt_embeds,
687
- negative_prompt_embeds,
688
- )
689
-
690
- self._guidance_scale = guidance_scale
691
- self._guidance_rescale = guidance_rescale
692
- self._clip_skip = clip_skip
693
- self._cross_attention_kwargs = cross_attention_kwargs
694
- self._interrupt = False
695
-
696
- # 2. Define call parameters
697
- if prompt is not None and isinstance(prompt, str):
698
- batch_size = 1
699
- elif prompt is not None and isinstance(prompt, list):
700
- batch_size = len(prompt)
701
- else:
702
- batch_size = prompt_embeds.shape[0]
703
-
704
- device = self._execution_device
705
-
706
- # 3. Encode input prompt
707
- lora_scale = (
708
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
709
- )
710
-
711
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
712
- prompt,
713
- device,
714
- num_images_per_prompt,
715
- self.do_classifier_free_guidance,
716
- negative_prompt,
717
- prompt_embeds=prompt_embeds,
718
- negative_prompt_embeds=negative_prompt_embeds,
719
- lora_scale=lora_scale,
720
- clip_skip=self.clip_skip,
721
- )
722
-
723
- # For classifier free guidance, we need to do two forward passes.
724
- # Here we concatenate the unconditional and text embeddings into a single batch
725
- # to avoid doing two forward passes
726
- if self.do_classifier_free_guidance:
727
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
728
-
729
- # 4. Prepare timesteps
730
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
731
-
732
- # 5. Prepare latent variables
733
- num_channels_latents = self.unet.config.in_channels
734
- latents = self.prepare_latents(
735
- batch_size * num_images_per_prompt,
736
- num_channels_latents,
737
- height,
738
- width,
739
- prompt_embeds.dtype,
740
- device,
741
- generator,
742
- latents,
743
- )
744
-
745
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
746
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
747
-
748
- # 6.2 Optionally get Guidance Scale Embedding
749
- timestep_cond = None
750
- if self.unet.config.time_cond_proj_dim is not None:
751
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
752
- timestep_cond = self.get_guidance_scale_embedding(
753
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
754
- ).to(device=device, dtype=latents.dtype)
755
-
756
- # 7. Denoising loop
757
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
758
- self._num_timesteps = len(timesteps)
759
- with self.progress_bar(total=num_inference_steps) as progress_bar:
760
- for i, t in enumerate(timesteps):
761
- if self.interrupt:
762
- continue
763
-
764
- # expand the latents if we are doing classifier free guidance
765
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
766
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
767
-
768
- # predict the noise residual
769
- noise_pred = self.unet(
770
- latent_model_input,
771
- t,
772
- encoder_hidden_states=prompt_embeds,
773
- timestep_cond=timestep_cond,
774
- cross_attention_kwargs=self.cross_attention_kwargs,
775
- return_dict=False,
776
- )[0]
777
-
778
- # perform guidance
779
- if self.do_classifier_free_guidance:
780
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
781
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
782
-
783
- if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
784
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
785
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
786
-
787
- # compute the previous noisy sample x_t -> x_t-1
788
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
789
-
790
- # call the callback, if provided
791
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
792
- progress_bar.update()
793
-
794
- if not output_type == "latent":
795
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
796
- 0
797
- ]
798
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
799
- else:
800
- image = latents
801
- has_nsfw_concept = None
802
-
803
- if has_nsfw_concept is None:
804
- do_denormalize = [True] * image.shape[0]
805
- else:
806
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
807
-
808
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
809
-
810
- # Offload all models
811
- self.maybe_free_model_hooks()
812
-
813
- if not return_dict:
814
- return (image, has_nsfw_concept)
815
-
816
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hd_painter.py DELETED
@@ -1,994 +0,0 @@
1
- import math
2
- import numbers
3
- from typing import Any, Callable, Dict, List, Optional, Union
4
-
5
- import torch
6
- import torch.nn.functional as F
7
- from torch import nn
8
-
9
- from diffusers.image_processor import PipelineImageInput
10
- from diffusers.models import AsymmetricAutoencoderKL, ImageProjection
11
- from diffusers.models.attention_processor import Attention, AttnProcessor
12
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import (
14
- StableDiffusionInpaintPipeline,
15
- retrieve_timesteps,
16
- )
17
- from diffusers.utils import deprecate
18
-
19
-
20
- class RASGAttnProcessor:
21
- def __init__(self, mask, token_idx, scale_factor):
22
- self.attention_scores = None # Stores the last output of the similarity matrix here. Each layer will get its own RASGAttnProcessor assigned
23
- self.mask = mask
24
- self.token_idx = token_idx
25
- self.scale_factor = scale_factor
26
- self.mask_resoltuion = mask.shape[-1] * mask.shape[-2] # 64 x 64 if the image is 512x512
27
-
28
- def __call__(
29
- self,
30
- attn: Attention,
31
- hidden_states: torch.Tensor,
32
- encoder_hidden_states: Optional[torch.Tensor] = None,
33
- attention_mask: Optional[torch.Tensor] = None,
34
- temb: Optional[torch.Tensor] = None,
35
- scale: float = 1.0,
36
- ) -> torch.Tensor:
37
- # Same as the default AttnProcessor up untill the part where similarity matrix gets saved
38
- downscale_factor = self.mask_resoltuion // hidden_states.shape[1]
39
- residual = hidden_states
40
-
41
- if attn.spatial_norm is not None:
42
- hidden_states = attn.spatial_norm(hidden_states, temb)
43
-
44
- input_ndim = hidden_states.ndim
45
-
46
- if input_ndim == 4:
47
- batch_size, channel, height, width = hidden_states.shape
48
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
49
-
50
- batch_size, sequence_length, _ = (
51
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
52
- )
53
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
54
-
55
- if attn.group_norm is not None:
56
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
57
-
58
- query = attn.to_q(hidden_states)
59
-
60
- if encoder_hidden_states is None:
61
- encoder_hidden_states = hidden_states
62
- elif attn.norm_cross:
63
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
64
-
65
- key = attn.to_k(encoder_hidden_states)
66
- value = attn.to_v(encoder_hidden_states)
67
-
68
- query = attn.head_to_batch_dim(query)
69
- key = attn.head_to_batch_dim(key)
70
- value = attn.head_to_batch_dim(value)
71
-
72
- # Automatically recognize the resolution and save the attention similarity values
73
- # We need to use the values before the softmax function, hence the rewritten get_attention_scores function.
74
- if downscale_factor == self.scale_factor**2:
75
- self.attention_scores = get_attention_scores(attn, query, key, attention_mask)
76
- attention_probs = self.attention_scores.softmax(dim=-1)
77
- attention_probs = attention_probs.to(query.dtype)
78
- else:
79
- attention_probs = attn.get_attention_scores(query, key, attention_mask) # Original code
80
-
81
- hidden_states = torch.bmm(attention_probs, value)
82
- hidden_states = attn.batch_to_head_dim(hidden_states)
83
-
84
- # linear proj
85
- hidden_states = attn.to_out[0](hidden_states)
86
- # dropout
87
- hidden_states = attn.to_out[1](hidden_states)
88
-
89
- if input_ndim == 4:
90
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
91
-
92
- if attn.residual_connection:
93
- hidden_states = hidden_states + residual
94
-
95
- hidden_states = hidden_states / attn.rescale_output_factor
96
-
97
- return hidden_states
98
-
99
-
100
- class PAIntAAttnProcessor:
101
- def __init__(self, transformer_block, mask, token_idx, do_classifier_free_guidance, scale_factors):
102
- self.transformer_block = transformer_block # Stores the parent transformer block.
103
- self.mask = mask
104
- self.scale_factors = scale_factors
105
- self.do_classifier_free_guidance = do_classifier_free_guidance
106
- self.token_idx = token_idx
107
- self.shape = mask.shape[2:]
108
- self.mask_resoltuion = mask.shape[-1] * mask.shape[-2] # 64 x 64
109
- self.default_processor = AttnProcessor()
110
-
111
- def __call__(
112
- self,
113
- attn: Attention,
114
- hidden_states: torch.Tensor,
115
- encoder_hidden_states: Optional[torch.Tensor] = None,
116
- attention_mask: Optional[torch.Tensor] = None,
117
- temb: Optional[torch.Tensor] = None,
118
- scale: float = 1.0,
119
- ) -> torch.Tensor:
120
- # Automatically recognize the resolution of the current attention layer and resize the masks accordingly
121
- downscale_factor = self.mask_resoltuion // hidden_states.shape[1]
122
-
123
- mask = None
124
- for factor in self.scale_factors:
125
- if downscale_factor == factor**2:
126
- shape = (self.shape[0] // factor, self.shape[1] // factor)
127
- mask = F.interpolate(self.mask, shape, mode="bicubic") # B, 1, H, W
128
- break
129
- if mask is None:
130
- return self.default_processor(attn, hidden_states, encoder_hidden_states, attention_mask, temb, scale)
131
-
132
- # STARTS HERE
133
- residual = hidden_states
134
- # Save the input hidden_states for later use
135
- input_hidden_states = hidden_states
136
-
137
- # ================================================== #
138
- # =============== SELF ATTENTION 1 ================= #
139
- # ================================================== #
140
-
141
- if attn.spatial_norm is not None:
142
- hidden_states = attn.spatial_norm(hidden_states, temb)
143
-
144
- input_ndim = hidden_states.ndim
145
-
146
- if input_ndim == 4:
147
- batch_size, channel, height, width = hidden_states.shape
148
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
149
-
150
- batch_size, sequence_length, _ = (
151
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
152
- )
153
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
154
-
155
- if attn.group_norm is not None:
156
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
157
-
158
- query = attn.to_q(hidden_states)
159
-
160
- if encoder_hidden_states is None:
161
- encoder_hidden_states = hidden_states
162
- elif attn.norm_cross:
163
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
164
-
165
- key = attn.to_k(encoder_hidden_states)
166
- value = attn.to_v(encoder_hidden_states)
167
-
168
- query = attn.head_to_batch_dim(query)
169
- key = attn.head_to_batch_dim(key)
170
- value = attn.head_to_batch_dim(value)
171
-
172
- # self_attention_probs = attn.get_attention_scores(query, key, attention_mask) # We can't use post-softmax attention scores in this case
173
- self_attention_scores = get_attention_scores(
174
- attn, query, key, attention_mask
175
- ) # The custom function returns pre-softmax probabilities
176
- self_attention_probs = self_attention_scores.softmax(
177
- dim=-1
178
- ) # Manually compute the probabilities here, the scores will be reused in the second part of PAIntA
179
- self_attention_probs = self_attention_probs.to(query.dtype)
180
-
181
- hidden_states = torch.bmm(self_attention_probs, value)
182
- hidden_states = attn.batch_to_head_dim(hidden_states)
183
-
184
- # linear proj
185
- hidden_states = attn.to_out[0](hidden_states)
186
- # dropout
187
- hidden_states = attn.to_out[1](hidden_states)
188
-
189
- # x = x + self.attn1(self.norm1(x))
190
-
191
- if input_ndim == 4:
192
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
193
-
194
- if attn.residual_connection: # So many residuals everywhere
195
- hidden_states = hidden_states + residual
196
-
197
- self_attention_output_hidden_states = hidden_states / attn.rescale_output_factor
198
-
199
- # ================================================== #
200
- # ============ BasicTransformerBlock =============== #
201
- # ================================================== #
202
- # We use a hack by running the code from the BasicTransformerBlock that is between Self and Cross attentions here
203
- # The other option would've been modifying the BasicTransformerBlock and adding this functionality here.
204
- # I assumed that changing the BasicTransformerBlock would have been a bigger deal and decided to use this hack isntead.
205
-
206
- # The SelfAttention block recieves the normalized latents from the BasicTransformerBlock,
207
- # But the residual of the output is the non-normalized version.
208
- # Therefore we unnormalize the input hidden state here
209
- unnormalized_input_hidden_states = (
210
- input_hidden_states + self.transformer_block.norm1.bias
211
- ) * self.transformer_block.norm1.weight
212
-
213
- # TODO: return if neccessary
214
- # if self.use_ada_layer_norm_zero:
215
- # attn_output = gate_msa.unsqueeze(1) * attn_output
216
- # elif self.use_ada_layer_norm_single:
217
- # attn_output = gate_msa * attn_output
218
-
219
- transformer_hidden_states = self_attention_output_hidden_states + unnormalized_input_hidden_states
220
- if transformer_hidden_states.ndim == 4:
221
- transformer_hidden_states = transformer_hidden_states.squeeze(1)
222
-
223
- # TODO: return if neccessary
224
- # 2.5 GLIGEN Control
225
- # if gligen_kwargs is not None:
226
- # transformer_hidden_states = self.fuser(transformer_hidden_states, gligen_kwargs["objs"])
227
- # NOTE: we experimented with using GLIGEN and HDPainter together, the results were not that great
228
-
229
- # 3. Cross-Attention
230
- if self.transformer_block.use_ada_layer_norm:
231
- # transformer_norm_hidden_states = self.transformer_block.norm2(transformer_hidden_states, timestep)
232
- raise NotImplementedError()
233
- elif self.transformer_block.use_ada_layer_norm_zero or self.transformer_block.use_layer_norm:
234
- transformer_norm_hidden_states = self.transformer_block.norm2(transformer_hidden_states)
235
- elif self.transformer_block.use_ada_layer_norm_single:
236
- # For PixArt norm2 isn't applied here:
237
- # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
238
- transformer_norm_hidden_states = transformer_hidden_states
239
- elif self.transformer_block.use_ada_layer_norm_continuous:
240
- # transformer_norm_hidden_states = self.transformer_block.norm2(transformer_hidden_states, added_cond_kwargs["pooled_text_emb"])
241
- raise NotImplementedError()
242
- else:
243
- raise ValueError("Incorrect norm")
244
-
245
- if self.transformer_block.pos_embed is not None and self.transformer_block.use_ada_layer_norm_single is False:
246
- transformer_norm_hidden_states = self.transformer_block.pos_embed(transformer_norm_hidden_states)
247
-
248
- # ================================================== #
249
- # ================= CROSS ATTENTION ================ #
250
- # ================================================== #
251
-
252
- # We do an initial pass of the CrossAttention up to obtaining the similarity matrix here.
253
- # The similarity matrix is used to obtain scaling coefficients for the attention matrix of the self attention
254
- # We reuse the previously computed self-attention matrix, and only repeat the steps after the softmax
255
-
256
- cross_attention_input_hidden_states = (
257
- transformer_norm_hidden_states # Renaming the variable for the sake of readability
258
- )
259
-
260
- # TODO: check if classifier_free_guidance is being used before splitting here
261
- if self.do_classifier_free_guidance:
262
- # Our scaling coefficients depend only on the conditional part, so we split the inputs
263
- (
264
- _cross_attention_input_hidden_states_unconditional,
265
- cross_attention_input_hidden_states_conditional,
266
- ) = cross_attention_input_hidden_states.chunk(2)
267
-
268
- # Same split for the encoder_hidden_states i.e. the tokens
269
- # Since the SelfAttention processors don't get the encoder states as input, we inject them into the processor in the begining.
270
- _encoder_hidden_states_unconditional, encoder_hidden_states_conditional = self.encoder_hidden_states.chunk(
271
- 2
272
- )
273
- else:
274
- cross_attention_input_hidden_states_conditional = cross_attention_input_hidden_states
275
- encoder_hidden_states_conditional = self.encoder_hidden_states.chunk(2)
276
-
277
- # Rename the variables for the sake of readability
278
- # The part below is the beginning of the __call__ function of the following CrossAttention layer
279
- cross_attention_hidden_states = cross_attention_input_hidden_states_conditional
280
- cross_attention_encoder_hidden_states = encoder_hidden_states_conditional
281
-
282
- attn2 = self.transformer_block.attn2
283
-
284
- if attn2.spatial_norm is not None:
285
- cross_attention_hidden_states = attn2.spatial_norm(cross_attention_hidden_states, temb)
286
-
287
- input_ndim = cross_attention_hidden_states.ndim
288
-
289
- if input_ndim == 4:
290
- batch_size, channel, height, width = cross_attention_hidden_states.shape
291
- cross_attention_hidden_states = cross_attention_hidden_states.view(
292
- batch_size, channel, height * width
293
- ).transpose(1, 2)
294
-
295
- (
296
- batch_size,
297
- sequence_length,
298
- _,
299
- ) = cross_attention_hidden_states.shape # It is definitely a cross attention, so no need for an if block
300
- # TODO: change the attention_mask here
301
- attention_mask = attn2.prepare_attention_mask(
302
- None, sequence_length, batch_size
303
- ) # I assume the attention mask is the same...
304
-
305
- if attn2.group_norm is not None:
306
- cross_attention_hidden_states = attn2.group_norm(cross_attention_hidden_states.transpose(1, 2)).transpose(
307
- 1, 2
308
- )
309
-
310
- query2 = attn2.to_q(cross_attention_hidden_states)
311
-
312
- if attn2.norm_cross:
313
- cross_attention_encoder_hidden_states = attn2.norm_encoder_hidden_states(
314
- cross_attention_encoder_hidden_states
315
- )
316
-
317
- key2 = attn2.to_k(cross_attention_encoder_hidden_states)
318
- query2 = attn2.head_to_batch_dim(query2)
319
- key2 = attn2.head_to_batch_dim(key2)
320
-
321
- cross_attention_probs = attn2.get_attention_scores(query2, key2, attention_mask)
322
-
323
- # CrossAttention ends here, the remaining part is not used
324
-
325
- # ================================================== #
326
- # ================ SELF ATTENTION 2 ================ #
327
- # ================================================== #
328
- # DEJA VU!
329
-
330
- mask = (mask > 0.5).to(self_attention_output_hidden_states.dtype)
331
- m = mask.to(self_attention_output_hidden_states.device)
332
- # m = rearrange(m, 'b c h w -> b (h w) c').contiguous()
333
- m = m.permute(0, 2, 3, 1).reshape((m.shape[0], -1, m.shape[1])).contiguous() # B HW 1
334
- m = torch.matmul(m, m.permute(0, 2, 1)) + (1 - m)
335
-
336
- # # Compute scaling coefficients for the similarity matrix
337
- # # Select the cross attention values for the correct tokens only!
338
- # cross_attention_probs = cross_attention_probs.mean(dim = 0)
339
- # cross_attention_probs = cross_attention_probs[:, self.token_idx].sum(dim=1)
340
-
341
- # cross_attention_probs = cross_attention_probs.reshape(shape)
342
- # gaussian_smoothing = GaussianSmoothing(channels=1, kernel_size=3, sigma=0.5, dim=2).to(self_attention_output_hidden_states.device)
343
- # cross_attention_probs = gaussian_smoothing(cross_attention_probs.unsqueeze(0))[0] # optional smoothing
344
- # cross_attention_probs = cross_attention_probs.reshape(-1)
345
- # cross_attention_probs = ((cross_attention_probs - torch.median(cross_attention_probs.ravel())) / torch.max(cross_attention_probs.ravel())).clip(0, 1)
346
-
347
- # c = (1 - m) * cross_attention_probs.reshape(1, 1, -1) + m # PAIntA scaling coefficients
348
-
349
- # Compute scaling coefficients for the similarity matrix
350
- # Select the cross attention values for the correct tokens only!
351
-
352
- batch_size, dims, channels = cross_attention_probs.shape
353
- batch_size = batch_size // attn.heads
354
- cross_attention_probs = cross_attention_probs.reshape((batch_size, attn.heads, dims, channels)) # B, D, HW, T
355
-
356
- cross_attention_probs = cross_attention_probs.mean(dim=1) # B, HW, T
357
- cross_attention_probs = cross_attention_probs[..., self.token_idx].sum(dim=-1) # B, HW
358
- cross_attention_probs = cross_attention_probs.reshape((batch_size,) + shape) # , B, H, W
359
-
360
- gaussian_smoothing = GaussianSmoothing(channels=1, kernel_size=3, sigma=0.5, dim=2).to(
361
- self_attention_output_hidden_states.device
362
- )
363
- cross_attention_probs = gaussian_smoothing(cross_attention_probs[:, None])[:, 0] # optional smoothing B, H, W
364
-
365
- # Median normalization
366
- cross_attention_probs = cross_attention_probs.reshape(batch_size, -1) # B, HW
367
- cross_attention_probs = (
368
- cross_attention_probs - cross_attention_probs.median(dim=-1, keepdim=True).values
369
- ) / cross_attention_probs.max(dim=-1, keepdim=True).values
370
- cross_attention_probs = cross_attention_probs.clip(0, 1)
371
-
372
- c = (1 - m) * cross_attention_probs.reshape(batch_size, 1, -1) + m
373
- c = c.repeat_interleave(attn.heads, 0) # BD, HW
374
- if self.do_classifier_free_guidance:
375
- c = torch.cat([c, c]) # 2BD, HW
376
-
377
- # Rescaling the original self-attention matrix
378
- self_attention_scores_rescaled = self_attention_scores * c
379
- self_attention_probs_rescaled = self_attention_scores_rescaled.softmax(dim=-1)
380
-
381
- # Continuing the self attention normally using the new matrix
382
- hidden_states = torch.bmm(self_attention_probs_rescaled, value)
383
- hidden_states = attn.batch_to_head_dim(hidden_states)
384
-
385
- # linear proj
386
- hidden_states = attn.to_out[0](hidden_states)
387
- # dropout
388
- hidden_states = attn.to_out[1](hidden_states)
389
-
390
- if input_ndim == 4:
391
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
392
-
393
- if attn.residual_connection:
394
- hidden_states = hidden_states + input_hidden_states
395
-
396
- hidden_states = hidden_states / attn.rescale_output_factor
397
-
398
- return hidden_states
399
-
400
-
401
- class StableDiffusionHDPainterPipeline(StableDiffusionInpaintPipeline):
402
- def get_tokenized_prompt(self, prompt):
403
- out = self.tokenizer(prompt)
404
- return [self.tokenizer.decode(x) for x in out["input_ids"]]
405
-
406
- def init_attn_processors(
407
- self,
408
- mask,
409
- token_idx,
410
- use_painta=True,
411
- use_rasg=True,
412
- painta_scale_factors=[2, 4], # 64x64 -> [16x16, 32x32]
413
- rasg_scale_factor=4, # 64x64 -> 16x16
414
- self_attention_layer_name="attn1",
415
- cross_attention_layer_name="attn2",
416
- list_of_painta_layer_names=None,
417
- list_of_rasg_layer_names=None,
418
- ):
419
- default_processor = AttnProcessor()
420
- width, height = mask.shape[-2:]
421
- width, height = width // self.vae_scale_factor, height // self.vae_scale_factor
422
-
423
- painta_scale_factors = [x * self.vae_scale_factor for x in painta_scale_factors]
424
- rasg_scale_factor = self.vae_scale_factor * rasg_scale_factor
425
-
426
- attn_processors = {}
427
- for x in self.unet.attn_processors:
428
- if (list_of_painta_layer_names is None and self_attention_layer_name in x) or (
429
- list_of_painta_layer_names is not None and x in list_of_painta_layer_names
430
- ):
431
- if use_painta:
432
- transformer_block = self.unet.get_submodule(x.replace(".attn1.processor", ""))
433
- attn_processors[x] = PAIntAAttnProcessor(
434
- transformer_block, mask, token_idx, self.do_classifier_free_guidance, painta_scale_factors
435
- )
436
- else:
437
- attn_processors[x] = default_processor
438
- elif (list_of_rasg_layer_names is None and cross_attention_layer_name in x) or (
439
- list_of_rasg_layer_names is not None and x in list_of_rasg_layer_names
440
- ):
441
- if use_rasg:
442
- attn_processors[x] = RASGAttnProcessor(mask, token_idx, rasg_scale_factor)
443
- else:
444
- attn_processors[x] = default_processor
445
-
446
- self.unet.set_attn_processor(attn_processors)
447
- # import json
448
- # with open('/home/hayk.manukyan/repos/diffusers/debug.txt', 'a') as f:
449
- # json.dump({x:str(y) for x,y in self.unet.attn_processors.items()}, f, indent=4)
450
-
451
- @torch.no_grad()
452
- def __call__(
453
- self,
454
- prompt: Union[str, List[str]] = None,
455
- image: PipelineImageInput = None,
456
- mask_image: PipelineImageInput = None,
457
- masked_image_latents: torch.Tensor = None,
458
- height: Optional[int] = None,
459
- width: Optional[int] = None,
460
- padding_mask_crop: Optional[int] = None,
461
- strength: float = 1.0,
462
- num_inference_steps: int = 50,
463
- timesteps: List[int] = None,
464
- guidance_scale: float = 7.5,
465
- positive_prompt: Optional[str] = "",
466
- negative_prompt: Optional[Union[str, List[str]]] = None,
467
- num_images_per_prompt: Optional[int] = 1,
468
- eta: float = 0.01,
469
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
470
- latents: Optional[torch.Tensor] = None,
471
- prompt_embeds: Optional[torch.Tensor] = None,
472
- negative_prompt_embeds: Optional[torch.Tensor] = None,
473
- ip_adapter_image: Optional[PipelineImageInput] = None,
474
- output_type: Optional[str] = "pil",
475
- return_dict: bool = True,
476
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
477
- clip_skip: int = None,
478
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
479
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
480
- use_painta=True,
481
- use_rasg=True,
482
- self_attention_layer_name=".attn1",
483
- cross_attention_layer_name=".attn2",
484
- painta_scale_factors=[2, 4], # 16 x 16 and 32 x 32
485
- rasg_scale_factor=4, # 16x16 by default
486
- list_of_painta_layer_names=None,
487
- list_of_rasg_layer_names=None,
488
- **kwargs,
489
- ):
490
- callback = kwargs.pop("callback", None)
491
- callback_steps = kwargs.pop("callback_steps", None)
492
-
493
- if callback is not None:
494
- deprecate(
495
- "callback",
496
- "1.0.0",
497
- "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
498
- )
499
- if callback_steps is not None:
500
- deprecate(
501
- "callback_steps",
502
- "1.0.0",
503
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
504
- )
505
-
506
- # 0. Default height and width to unet
507
- height = height or self.unet.config.sample_size * self.vae_scale_factor
508
- width = width or self.unet.config.sample_size * self.vae_scale_factor
509
-
510
- #
511
- prompt_no_positives = prompt
512
- if isinstance(prompt, list):
513
- prompt = [x + positive_prompt for x in prompt]
514
- else:
515
- prompt = prompt + positive_prompt
516
-
517
- # 1. Check inputs
518
- self.check_inputs(
519
- prompt,
520
- image,
521
- mask_image,
522
- height,
523
- width,
524
- strength,
525
- callback_steps,
526
- negative_prompt,
527
- prompt_embeds,
528
- negative_prompt_embeds,
529
- callback_on_step_end_tensor_inputs,
530
- padding_mask_crop,
531
- )
532
-
533
- self._guidance_scale = guidance_scale
534
- self._clip_skip = clip_skip
535
- self._cross_attention_kwargs = cross_attention_kwargs
536
- self._interrupt = False
537
-
538
- # 2. Define call parameters
539
- if prompt is not None and isinstance(prompt, str):
540
- batch_size = 1
541
- elif prompt is not None and isinstance(prompt, list):
542
- batch_size = len(prompt)
543
- else:
544
- batch_size = prompt_embeds.shape[0]
545
-
546
- # assert batch_size == 1, "Does not work with batch size > 1 currently"
547
-
548
- device = self._execution_device
549
-
550
- # 3. Encode input prompt
551
- text_encoder_lora_scale = (
552
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
553
- )
554
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
555
- prompt,
556
- device,
557
- num_images_per_prompt,
558
- self.do_classifier_free_guidance,
559
- negative_prompt,
560
- prompt_embeds=prompt_embeds,
561
- negative_prompt_embeds=negative_prompt_embeds,
562
- lora_scale=text_encoder_lora_scale,
563
- clip_skip=self.clip_skip,
564
- )
565
- # For classifier free guidance, we need to do two forward passes.
566
- # Here we concatenate the unconditional and text embeddings into a single batch
567
- # to avoid doing two forward passes
568
- if self.do_classifier_free_guidance:
569
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
570
-
571
- if ip_adapter_image is not None:
572
- output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
573
- image_embeds, negative_image_embeds = self.encode_image(
574
- ip_adapter_image, device, num_images_per_prompt, output_hidden_state
575
- )
576
- if self.do_classifier_free_guidance:
577
- image_embeds = torch.cat([negative_image_embeds, image_embeds])
578
-
579
- # 4. set timesteps
580
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
581
- timesteps, num_inference_steps = self.get_timesteps(
582
- num_inference_steps=num_inference_steps, strength=strength, device=device
583
- )
584
- # check that number of inference steps is not < 1 - as this doesn't make sense
585
- if num_inference_steps < 1:
586
- raise ValueError(
587
- f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
588
- f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
589
- )
590
- # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
591
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
592
- # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
593
- is_strength_max = strength == 1.0
594
-
595
- # 5. Preprocess mask and image
596
-
597
- if padding_mask_crop is not None:
598
- crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
599
- resize_mode = "fill"
600
- else:
601
- crops_coords = None
602
- resize_mode = "default"
603
-
604
- original_image = image
605
- init_image = self.image_processor.preprocess(
606
- image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
607
- )
608
- init_image = init_image.to(dtype=torch.float32)
609
-
610
- # 6. Prepare latent variables
611
- num_channels_latents = self.vae.config.latent_channels
612
- num_channels_unet = self.unet.config.in_channels
613
- return_image_latents = num_channels_unet == 4
614
-
615
- latents_outputs = self.prepare_latents(
616
- batch_size * num_images_per_prompt,
617
- num_channels_latents,
618
- height,
619
- width,
620
- prompt_embeds.dtype,
621
- device,
622
- generator,
623
- latents,
624
- image=init_image,
625
- timestep=latent_timestep,
626
- is_strength_max=is_strength_max,
627
- return_noise=True,
628
- return_image_latents=return_image_latents,
629
- )
630
-
631
- if return_image_latents:
632
- latents, noise, image_latents = latents_outputs
633
- else:
634
- latents, noise = latents_outputs
635
-
636
- # 7. Prepare mask latent variables
637
- mask_condition = self.mask_processor.preprocess(
638
- mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
639
- )
640
-
641
- if masked_image_latents is None:
642
- masked_image = init_image * (mask_condition < 0.5)
643
- else:
644
- masked_image = masked_image_latents
645
-
646
- mask, masked_image_latents = self.prepare_mask_latents(
647
- mask_condition,
648
- masked_image,
649
- batch_size * num_images_per_prompt,
650
- height,
651
- width,
652
- prompt_embeds.dtype,
653
- device,
654
- generator,
655
- self.do_classifier_free_guidance,
656
- )
657
-
658
- # 7.5 Setting up HD-Painter
659
-
660
- # Get the indices of the tokens to be modified by both RASG and PAIntA
661
- token_idx = list(range(1, self.get_tokenized_prompt(prompt_no_positives).index("<|endoftext|>"))) + [
662
- self.get_tokenized_prompt(prompt).index("<|endoftext|>")
663
- ]
664
-
665
- # Setting up the attention processors
666
- self.init_attn_processors(
667
- mask_condition,
668
- token_idx,
669
- use_painta,
670
- use_rasg,
671
- painta_scale_factors=painta_scale_factors,
672
- rasg_scale_factor=rasg_scale_factor,
673
- self_attention_layer_name=self_attention_layer_name,
674
- cross_attention_layer_name=cross_attention_layer_name,
675
- list_of_painta_layer_names=list_of_painta_layer_names,
676
- list_of_rasg_layer_names=list_of_rasg_layer_names,
677
- )
678
-
679
- # 8. Check that sizes of mask, masked image and latents match
680
- if num_channels_unet == 9:
681
- # default case for runwayml/stable-diffusion-inpainting
682
- num_channels_mask = mask.shape[1]
683
- num_channels_masked_image = masked_image_latents.shape[1]
684
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
685
- raise ValueError(
686
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
687
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
688
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
689
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
690
- " `pipeline.unet` or your `mask_image` or `image` input."
691
- )
692
- elif num_channels_unet != 4:
693
- raise ValueError(
694
- f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
695
- )
696
-
697
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
698
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
699
-
700
- if use_rasg:
701
- extra_step_kwargs["generator"] = None
702
-
703
- # 9.1 Add image embeds for IP-Adapter
704
- added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
705
-
706
- # 9.2 Optionally get Guidance Scale Embedding
707
- timestep_cond = None
708
- if self.unet.config.time_cond_proj_dim is not None:
709
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
710
- timestep_cond = self.get_guidance_scale_embedding(
711
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
712
- ).to(device=device, dtype=latents.dtype)
713
-
714
- # 10. Denoising loop
715
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
716
- self._num_timesteps = len(timesteps)
717
- painta_active = True
718
-
719
- with self.progress_bar(total=num_inference_steps) as progress_bar:
720
- for i, t in enumerate(timesteps):
721
- if self.interrupt:
722
- continue
723
-
724
- if t < 500 and painta_active:
725
- self.init_attn_processors(
726
- mask_condition,
727
- token_idx,
728
- False,
729
- use_rasg,
730
- painta_scale_factors=painta_scale_factors,
731
- rasg_scale_factor=rasg_scale_factor,
732
- self_attention_layer_name=self_attention_layer_name,
733
- cross_attention_layer_name=cross_attention_layer_name,
734
- list_of_painta_layer_names=list_of_painta_layer_names,
735
- list_of_rasg_layer_names=list_of_rasg_layer_names,
736
- )
737
- painta_active = False
738
-
739
- with torch.enable_grad():
740
- self.unet.zero_grad()
741
- latents = latents.detach()
742
- latents.requires_grad = True
743
-
744
- # expand the latents if we are doing classifier free guidance
745
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
746
-
747
- # concat latents, mask, masked_image_latents in the channel dimension
748
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
749
-
750
- if num_channels_unet == 9:
751
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
752
-
753
- self.scheduler.latents = latents
754
- self.encoder_hidden_states = prompt_embeds
755
- for attn_processor in self.unet.attn_processors.values():
756
- attn_processor.encoder_hidden_states = prompt_embeds
757
-
758
- # predict the noise residual
759
- noise_pred = self.unet(
760
- latent_model_input,
761
- t,
762
- encoder_hidden_states=prompt_embeds,
763
- timestep_cond=timestep_cond,
764
- cross_attention_kwargs=self.cross_attention_kwargs,
765
- added_cond_kwargs=added_cond_kwargs,
766
- return_dict=False,
767
- )[0]
768
-
769
- # perform guidance
770
- if self.do_classifier_free_guidance:
771
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
772
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
773
-
774
- if use_rasg:
775
- # Perform RASG
776
- _, _, height, width = mask_condition.shape # 512 x 512
777
- scale_factor = self.vae_scale_factor * rasg_scale_factor # 8 * 4 = 32
778
-
779
- # TODO: Fix for > 1 batch_size
780
- rasg_mask = F.interpolate(
781
- mask_condition, (height // scale_factor, width // scale_factor), mode="bicubic"
782
- )[0, 0] # mode is nearest by default, B, H, W
783
-
784
- # Aggregate the saved attention maps
785
- attn_map = []
786
- for processor in self.unet.attn_processors.values():
787
- if hasattr(processor, "attention_scores") and processor.attention_scores is not None:
788
- if self.do_classifier_free_guidance:
789
- attn_map.append(processor.attention_scores.chunk(2)[1]) # (B/2) x H, 256, 77
790
- else:
791
- attn_map.append(processor.attention_scores) # B x H, 256, 77 ?
792
-
793
- attn_map = (
794
- torch.cat(attn_map)
795
- .mean(0)
796
- .permute(1, 0)
797
- .reshape((-1, height // scale_factor, width // scale_factor))
798
- ) # 77, 16, 16
799
-
800
- # Compute the attention score
801
- attn_score = -sum(
802
- [
803
- F.binary_cross_entropy_with_logits(x - 1.0, rasg_mask.to(device))
804
- for x in attn_map[token_idx]
805
- ]
806
- )
807
-
808
- # Backward the score and compute the gradients
809
- attn_score.backward()
810
-
811
- # Normalzie the gradients and compute the noise component
812
- variance_noise = latents.grad.detach()
813
- # print("VARIANCE SHAPE", variance_noise.shape)
814
- variance_noise -= torch.mean(variance_noise, [1, 2, 3], keepdim=True)
815
- variance_noise /= torch.std(variance_noise, [1, 2, 3], keepdim=True)
816
- else:
817
- variance_noise = None
818
-
819
- # compute the previous noisy sample x_t -> x_t-1
820
- latents = self.scheduler.step(
821
- noise_pred, t, latents, **extra_step_kwargs, return_dict=False, variance_noise=variance_noise
822
- )[0]
823
-
824
- if num_channels_unet == 4:
825
- init_latents_proper = image_latents
826
- if self.do_classifier_free_guidance:
827
- init_mask, _ = mask.chunk(2)
828
- else:
829
- init_mask = mask
830
-
831
- if i < len(timesteps) - 1:
832
- noise_timestep = timesteps[i + 1]
833
- init_latents_proper = self.scheduler.add_noise(
834
- init_latents_proper, noise, torch.tensor([noise_timestep])
835
- )
836
-
837
- latents = (1 - init_mask) * init_latents_proper + init_mask * latents
838
-
839
- if callback_on_step_end is not None:
840
- callback_kwargs = {}
841
- for k in callback_on_step_end_tensor_inputs:
842
- callback_kwargs[k] = locals()[k]
843
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
844
-
845
- latents = callback_outputs.pop("latents", latents)
846
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
847
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
848
- mask = callback_outputs.pop("mask", mask)
849
- masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents)
850
-
851
- # call the callback, if provided
852
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
853
- progress_bar.update()
854
- if callback is not None and i % callback_steps == 0:
855
- step_idx = i // getattr(self.scheduler, "order", 1)
856
- callback(step_idx, t, latents)
857
-
858
- if not output_type == "latent":
859
- condition_kwargs = {}
860
- if isinstance(self.vae, AsymmetricAutoencoderKL):
861
- init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)
862
- init_image_condition = init_image.clone()
863
- init_image = self._encode_vae_image(init_image, generator=generator)
864
- mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)
865
- condition_kwargs = {"image": init_image_condition, "mask": mask_condition}
866
- image = self.vae.decode(
867
- latents / self.vae.config.scaling_factor, return_dict=False, generator=generator, **condition_kwargs
868
- )[0]
869
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
870
- else:
871
- image = latents
872
- has_nsfw_concept = None
873
-
874
- if has_nsfw_concept is None:
875
- do_denormalize = [True] * image.shape[0]
876
- else:
877
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
878
-
879
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
880
-
881
- if padding_mask_crop is not None:
882
- image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image]
883
-
884
- # Offload all models
885
- self.maybe_free_model_hooks()
886
-
887
- if not return_dict:
888
- return (image, has_nsfw_concept)
889
-
890
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
891
-
892
-
893
- # ============= Utility Functions ============== #
894
-
895
-
896
- class GaussianSmoothing(nn.Module):
897
- """
898
- Apply gaussian smoothing on a
899
- 1d, 2d or 3d tensor. Filtering is performed seperately for each channel
900
- in the input using a depthwise convolution.
901
- Arguments:
902
- channels (int, sequence): Number of channels of the input tensors. Output will
903
- have this number of channels as well.
904
- kernel_size (int, sequence): Size of the gaussian kernel.
905
- sigma (float, sequence): Standard deviation of the gaussian kernel.
906
- dim (int, optional): The number of dimensions of the data.
907
- Default value is 2 (spatial).
908
- """
909
-
910
- def __init__(self, channels, kernel_size, sigma, dim=2):
911
- super(GaussianSmoothing, self).__init__()
912
- if isinstance(kernel_size, numbers.Number):
913
- kernel_size = [kernel_size] * dim
914
- if isinstance(sigma, numbers.Number):
915
- sigma = [sigma] * dim
916
-
917
- # The gaussian kernel is the product of the
918
- # gaussian function of each dimension.
919
- kernel = 1
920
- meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
921
- for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
922
- mean = (size - 1) / 2
923
- kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2))
924
-
925
- # Make sure sum of values in gaussian kernel equals 1.
926
- kernel = kernel / torch.sum(kernel)
927
-
928
- # Reshape to depthwise convolutional weight
929
- kernel = kernel.view(1, 1, *kernel.size())
930
- kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
931
-
932
- self.register_buffer("weight", kernel)
933
- self.groups = channels
934
-
935
- if dim == 1:
936
- self.conv = F.conv1d
937
- elif dim == 2:
938
- self.conv = F.conv2d
939
- elif dim == 3:
940
- self.conv = F.conv3d
941
- else:
942
- raise RuntimeError("Only 1, 2 and 3 dimensions are supported. Received {}.".format(dim))
943
-
944
- def forward(self, input):
945
- """
946
- Apply gaussian filter to input.
947
- Arguments:
948
- input (torch.Tensor): Input to apply gaussian filter on.
949
- Returns:
950
- filtered (torch.Tensor): Filtered output.
951
- """
952
- return self.conv(input, weight=self.weight.to(input.dtype), groups=self.groups, padding="same")
953
-
954
-
955
- def get_attention_scores(
956
- self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None
957
- ) -> torch.Tensor:
958
- r"""
959
- Compute the attention scores.
960
-
961
- Args:
962
- query (`torch.Tensor`): The query tensor.
963
- key (`torch.Tensor`): The key tensor.
964
- attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.
965
-
966
- Returns:
967
- `torch.Tensor`: The attention probabilities/scores.
968
- """
969
- if self.upcast_attention:
970
- query = query.float()
971
- key = key.float()
972
-
973
- if attention_mask is None:
974
- baddbmm_input = torch.empty(
975
- query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device
976
- )
977
- beta = 0
978
- else:
979
- baddbmm_input = attention_mask
980
- beta = 1
981
-
982
- attention_scores = torch.baddbmm(
983
- baddbmm_input,
984
- query,
985
- key.transpose(-1, -2),
986
- beta=beta,
987
- alpha=self.scale,
988
- )
989
- del baddbmm_input
990
-
991
- if self.upcast_softmax:
992
- attention_scores = attention_scores.float()
993
-
994
- return attention_scores
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
iadb.py DELETED
@@ -1,149 +0,0 @@
1
- from typing import List, Optional, Tuple, Union
2
-
3
- import torch
4
-
5
- from diffusers import DiffusionPipeline
6
- from diffusers.configuration_utils import ConfigMixin
7
- from diffusers.pipelines.pipeline_utils import ImagePipelineOutput
8
- from diffusers.schedulers.scheduling_utils import SchedulerMixin
9
-
10
-
11
- class IADBScheduler(SchedulerMixin, ConfigMixin):
12
- """
13
- IADBScheduler is a scheduler for the Iterative α-(de)Blending denoising method. It is simple and minimalist.
14
-
15
- For more details, see the original paper: https://arxiv.org/abs/2305.03486 and the blog post: https://ggx-research.github.io/publication/2023/05/10/publication-iadb.html
16
- """
17
-
18
- def step(
19
- self,
20
- model_output: torch.Tensor,
21
- timestep: int,
22
- x_alpha: torch.Tensor,
23
- ) -> torch.Tensor:
24
- """
25
- Predict the sample at the previous timestep by reversing the ODE. Core function to propagate the diffusion
26
- process from the learned model outputs (most often the predicted noise).
27
-
28
- Args:
29
- model_output (`torch.Tensor`): direct output from learned diffusion model. It is the direction from x0 to x1.
30
- timestep (`float`): current timestep in the diffusion chain.
31
- x_alpha (`torch.Tensor`): x_alpha sample for the current timestep
32
-
33
- Returns:
34
- `torch.Tensor`: the sample at the previous timestep
35
-
36
- """
37
- if self.num_inference_steps is None:
38
- raise ValueError(
39
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
40
- )
41
-
42
- alpha = timestep / self.num_inference_steps
43
- alpha_next = (timestep + 1) / self.num_inference_steps
44
-
45
- d = model_output
46
-
47
- x_alpha = x_alpha + (alpha_next - alpha) * d
48
-
49
- return x_alpha
50
-
51
- def set_timesteps(self, num_inference_steps: int):
52
- self.num_inference_steps = num_inference_steps
53
-
54
- def add_noise(
55
- self,
56
- original_samples: torch.Tensor,
57
- noise: torch.Tensor,
58
- alpha: torch.Tensor,
59
- ) -> torch.Tensor:
60
- return original_samples * alpha + noise * (1 - alpha)
61
-
62
- def __len__(self):
63
- return self.config.num_train_timesteps
64
-
65
-
66
- class IADBPipeline(DiffusionPipeline):
67
- r"""
68
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
69
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
70
-
71
- Parameters:
72
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
73
- scheduler ([`SchedulerMixin`]):
74
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
75
- [`DDPMScheduler`], or [`DDIMScheduler`].
76
- """
77
-
78
- def __init__(self, unet, scheduler):
79
- super().__init__()
80
-
81
- self.register_modules(unet=unet, scheduler=scheduler)
82
-
83
- @torch.no_grad()
84
- def __call__(
85
- self,
86
- batch_size: int = 1,
87
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
88
- num_inference_steps: int = 50,
89
- output_type: Optional[str] = "pil",
90
- return_dict: bool = True,
91
- ) -> Union[ImagePipelineOutput, Tuple]:
92
- r"""
93
- Args:
94
- batch_size (`int`, *optional*, defaults to 1):
95
- The number of images to generate.
96
- num_inference_steps (`int`, *optional*, defaults to 50):
97
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
98
- expense of slower inference.
99
- output_type (`str`, *optional*, defaults to `"pil"`):
100
- The output format of the generate image. Choose between
101
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
102
- return_dict (`bool`, *optional*, defaults to `True`):
103
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
104
-
105
- Returns:
106
- [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
107
- True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
108
- """
109
-
110
- # Sample gaussian noise to begin loop
111
- if isinstance(self.unet.config.sample_size, int):
112
- image_shape = (
113
- batch_size,
114
- self.unet.config.in_channels,
115
- self.unet.config.sample_size,
116
- self.unet.config.sample_size,
117
- )
118
- else:
119
- image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
120
-
121
- if isinstance(generator, list) and len(generator) != batch_size:
122
- raise ValueError(
123
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
124
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
125
- )
126
-
127
- image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)
128
-
129
- # set step values
130
- self.scheduler.set_timesteps(num_inference_steps)
131
- x_alpha = image.clone()
132
- for t in self.progress_bar(range(num_inference_steps)):
133
- alpha = t / num_inference_steps
134
-
135
- # 1. predict noise model_output
136
- model_output = self.unet(x_alpha, torch.tensor(alpha, device=x_alpha.device)).sample
137
-
138
- # 2. step
139
- x_alpha = self.scheduler.step(model_output, t, x_alpha)
140
-
141
- image = (x_alpha * 0.5 + 0.5).clamp(0, 1)
142
- image = image.cpu().permute(0, 2, 3, 1).numpy()
143
- if output_type == "pil":
144
- image = self.numpy_to_pil(image)
145
-
146
- if not return_dict:
147
- return (image,)
148
-
149
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
imagic_stable_diffusion.py DELETED
@@ -1,470 +0,0 @@
1
- """
2
- modeled after the textual_inversion.py / train_dreambooth.py and the work
3
- of justinpinkney here: https://github.com/justinpinkney/stable-diffusion/blob/main/notebooks/imagic.ipynb
4
- """
5
-
6
- import inspect
7
- import warnings
8
- from typing import List, Optional, Union
9
-
10
- import numpy as np
11
- import PIL.Image
12
- import torch
13
- import torch.nn.functional as F
14
- from accelerate import Accelerator
15
-
16
- # TODO: remove and import from diffusers.utils when the new version of diffusers is released
17
- from packaging import version
18
- from tqdm.auto import tqdm
19
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
20
-
21
- from diffusers import DiffusionPipeline
22
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
23
- from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
24
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
25
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
26
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
27
- from diffusers.utils import logging
28
-
29
-
30
- if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
31
- PIL_INTERPOLATION = {
32
- "linear": PIL.Image.Resampling.BILINEAR,
33
- "bilinear": PIL.Image.Resampling.BILINEAR,
34
- "bicubic": PIL.Image.Resampling.BICUBIC,
35
- "lanczos": PIL.Image.Resampling.LANCZOS,
36
- "nearest": PIL.Image.Resampling.NEAREST,
37
- }
38
- else:
39
- PIL_INTERPOLATION = {
40
- "linear": PIL.Image.LINEAR,
41
- "bilinear": PIL.Image.BILINEAR,
42
- "bicubic": PIL.Image.BICUBIC,
43
- "lanczos": PIL.Image.LANCZOS,
44
- "nearest": PIL.Image.NEAREST,
45
- }
46
- # ------------------------------------------------------------------------------
47
-
48
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
49
-
50
-
51
- def preprocess(image):
52
- w, h = image.size
53
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
54
- image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
55
- image = np.array(image).astype(np.float32) / 255.0
56
- image = image[None].transpose(0, 3, 1, 2)
57
- image = torch.from_numpy(image)
58
- return 2.0 * image - 1.0
59
-
60
-
61
- class ImagicStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
62
- r"""
63
- Pipeline for imagic image editing.
64
- See paper here: https://arxiv.org/pdf/2210.09276.pdf
65
-
66
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
67
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
68
- Args:
69
- vae ([`AutoencoderKL`]):
70
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
71
- text_encoder ([`CLIPTextModel`]):
72
- Frozen text-encoder. Stable Diffusion uses the text portion of
73
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
74
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
75
- tokenizer (`CLIPTokenizer`):
76
- Tokenizer of class
77
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
78
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
79
- scheduler ([`SchedulerMixin`]):
80
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
81
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
82
- safety_checker ([`StableDiffusionSafetyChecker`]):
83
- Classification module that estimates whether generated images could be considered offsensive or harmful.
84
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
85
- feature_extractor ([`CLIPImageProcessor`]):
86
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
87
- """
88
-
89
- def __init__(
90
- self,
91
- vae: AutoencoderKL,
92
- text_encoder: CLIPTextModel,
93
- tokenizer: CLIPTokenizer,
94
- unet: UNet2DConditionModel,
95
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
96
- safety_checker: StableDiffusionSafetyChecker,
97
- feature_extractor: CLIPImageProcessor,
98
- ):
99
- super().__init__()
100
- self.register_modules(
101
- vae=vae,
102
- text_encoder=text_encoder,
103
- tokenizer=tokenizer,
104
- unet=unet,
105
- scheduler=scheduler,
106
- safety_checker=safety_checker,
107
- feature_extractor=feature_extractor,
108
- )
109
-
110
- def train(
111
- self,
112
- prompt: Union[str, List[str]],
113
- image: Union[torch.Tensor, PIL.Image.Image],
114
- height: Optional[int] = 512,
115
- width: Optional[int] = 512,
116
- generator: Optional[torch.Generator] = None,
117
- embedding_learning_rate: float = 0.001,
118
- diffusion_model_learning_rate: float = 2e-6,
119
- text_embedding_optimization_steps: int = 500,
120
- model_fine_tuning_optimization_steps: int = 1000,
121
- **kwargs,
122
- ):
123
- r"""
124
- Function invoked when calling the pipeline for generation.
125
- Args:
126
- prompt (`str` or `List[str]`):
127
- The prompt or prompts to guide the image generation.
128
- height (`int`, *optional*, defaults to 512):
129
- The height in pixels of the generated image.
130
- width (`int`, *optional*, defaults to 512):
131
- The width in pixels of the generated image.
132
- num_inference_steps (`int`, *optional*, defaults to 50):
133
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
134
- expense of slower inference.
135
- guidance_scale (`float`, *optional*, defaults to 7.5):
136
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
137
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
138
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
139
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
140
- usually at the expense of lower image quality.
141
- eta (`float`, *optional*, defaults to 0.0):
142
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
143
- [`schedulers.DDIMScheduler`], will be ignored for others.
144
- generator (`torch.Generator`, *optional*):
145
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
146
- deterministic.
147
- latents (`torch.Tensor`, *optional*):
148
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
149
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
150
- tensor will ge generated by sampling using the supplied random `generator`.
151
- output_type (`str`, *optional*, defaults to `"pil"`):
152
- The output format of the generate image. Choose between
153
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
154
- return_dict (`bool`, *optional*, defaults to `True`):
155
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
156
- plain tuple.
157
- Returns:
158
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
159
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
160
- When returning a tuple, the first element is a list with the generated images, and the second element is a
161
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
162
- (nsfw) content, according to the `safety_checker`.
163
- """
164
- accelerator = Accelerator(
165
- gradient_accumulation_steps=1,
166
- mixed_precision="fp16",
167
- )
168
-
169
- if "torch_device" in kwargs:
170
- device = kwargs.pop("torch_device")
171
- warnings.warn(
172
- "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
173
- " Consider using `pipe.to(torch_device)` instead."
174
- )
175
-
176
- if device is None:
177
- device = "cuda" if torch.cuda.is_available() else "cpu"
178
- self.to(device)
179
-
180
- if height % 8 != 0 or width % 8 != 0:
181
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
182
-
183
- # Freeze vae and unet
184
- self.vae.requires_grad_(False)
185
- self.unet.requires_grad_(False)
186
- self.text_encoder.requires_grad_(False)
187
- self.unet.eval()
188
- self.vae.eval()
189
- self.text_encoder.eval()
190
-
191
- if accelerator.is_main_process:
192
- accelerator.init_trackers(
193
- "imagic",
194
- config={
195
- "embedding_learning_rate": embedding_learning_rate,
196
- "text_embedding_optimization_steps": text_embedding_optimization_steps,
197
- },
198
- )
199
-
200
- # get text embeddings for prompt
201
- text_input = self.tokenizer(
202
- prompt,
203
- padding="max_length",
204
- max_length=self.tokenizer.model_max_length,
205
- truncation=True,
206
- return_tensors="pt",
207
- )
208
- text_embeddings = torch.nn.Parameter(
209
- self.text_encoder(text_input.input_ids.to(self.device))[0], requires_grad=True
210
- )
211
- text_embeddings = text_embeddings.detach()
212
- text_embeddings.requires_grad_()
213
- text_embeddings_orig = text_embeddings.clone()
214
-
215
- # Initialize the optimizer
216
- optimizer = torch.optim.Adam(
217
- [text_embeddings], # only optimize the embeddings
218
- lr=embedding_learning_rate,
219
- )
220
-
221
- if isinstance(image, PIL.Image.Image):
222
- image = preprocess(image)
223
-
224
- latents_dtype = text_embeddings.dtype
225
- image = image.to(device=self.device, dtype=latents_dtype)
226
- init_latent_image_dist = self.vae.encode(image).latent_dist
227
- image_latents = init_latent_image_dist.sample(generator=generator)
228
- image_latents = 0.18215 * image_latents
229
-
230
- progress_bar = tqdm(range(text_embedding_optimization_steps), disable=not accelerator.is_local_main_process)
231
- progress_bar.set_description("Steps")
232
-
233
- global_step = 0
234
-
235
- logger.info("First optimizing the text embedding to better reconstruct the init image")
236
- for _ in range(text_embedding_optimization_steps):
237
- with accelerator.accumulate(text_embeddings):
238
- # Sample noise that we'll add to the latents
239
- noise = torch.randn(image_latents.shape).to(image_latents.device)
240
- timesteps = torch.randint(1000, (1,), device=image_latents.device)
241
-
242
- # Add noise to the latents according to the noise magnitude at each timestep
243
- # (this is the forward diffusion process)
244
- noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
245
-
246
- # Predict the noise residual
247
- noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
248
-
249
- loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
250
- accelerator.backward(loss)
251
-
252
- optimizer.step()
253
- optimizer.zero_grad()
254
-
255
- # Checks if the accelerator has performed an optimization step behind the scenes
256
- if accelerator.sync_gradients:
257
- progress_bar.update(1)
258
- global_step += 1
259
-
260
- logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
261
- progress_bar.set_postfix(**logs)
262
- accelerator.log(logs, step=global_step)
263
-
264
- accelerator.wait_for_everyone()
265
-
266
- text_embeddings.requires_grad_(False)
267
-
268
- # Now we fine tune the unet to better reconstruct the image
269
- self.unet.requires_grad_(True)
270
- self.unet.train()
271
- optimizer = torch.optim.Adam(
272
- self.unet.parameters(), # only optimize unet
273
- lr=diffusion_model_learning_rate,
274
- )
275
- progress_bar = tqdm(range(model_fine_tuning_optimization_steps), disable=not accelerator.is_local_main_process)
276
-
277
- logger.info("Next fine tuning the entire model to better reconstruct the init image")
278
- for _ in range(model_fine_tuning_optimization_steps):
279
- with accelerator.accumulate(self.unet.parameters()):
280
- # Sample noise that we'll add to the latents
281
- noise = torch.randn(image_latents.shape).to(image_latents.device)
282
- timesteps = torch.randint(1000, (1,), device=image_latents.device)
283
-
284
- # Add noise to the latents according to the noise magnitude at each timestep
285
- # (this is the forward diffusion process)
286
- noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
287
-
288
- # Predict the noise residual
289
- noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
290
-
291
- loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
292
- accelerator.backward(loss)
293
-
294
- optimizer.step()
295
- optimizer.zero_grad()
296
-
297
- # Checks if the accelerator has performed an optimization step behind the scenes
298
- if accelerator.sync_gradients:
299
- progress_bar.update(1)
300
- global_step += 1
301
-
302
- logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
303
- progress_bar.set_postfix(**logs)
304
- accelerator.log(logs, step=global_step)
305
-
306
- accelerator.wait_for_everyone()
307
- self.text_embeddings_orig = text_embeddings_orig
308
- self.text_embeddings = text_embeddings
309
-
310
- @torch.no_grad()
311
- def __call__(
312
- self,
313
- alpha: float = 1.2,
314
- height: Optional[int] = 512,
315
- width: Optional[int] = 512,
316
- num_inference_steps: Optional[int] = 50,
317
- generator: Optional[torch.Generator] = None,
318
- output_type: Optional[str] = "pil",
319
- return_dict: bool = True,
320
- guidance_scale: float = 7.5,
321
- eta: float = 0.0,
322
- ):
323
- r"""
324
- Function invoked when calling the pipeline for generation.
325
- Args:
326
- alpha (`float`, *optional*, defaults to 1.2):
327
- The interpolation factor between the original and optimized text embeddings. A value closer to 0
328
- will resemble the original input image.
329
- height (`int`, *optional*, defaults to 512):
330
- The height in pixels of the generated image.
331
- width (`int`, *optional*, defaults to 512):
332
- The width in pixels of the generated image.
333
- num_inference_steps (`int`, *optional*, defaults to 50):
334
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
335
- expense of slower inference.
336
- guidance_scale (`float`, *optional*, defaults to 7.5):
337
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
338
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
339
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
340
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
341
- usually at the expense of lower image quality.
342
- generator (`torch.Generator`, *optional*):
343
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
344
- deterministic.
345
- output_type (`str`, *optional*, defaults to `"pil"`):
346
- The output format of the generate image. Choose between
347
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
348
- return_dict (`bool`, *optional*, defaults to `True`):
349
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
350
- plain tuple.
351
- eta (`float`, *optional*, defaults to 0.0):
352
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
353
- [`schedulers.DDIMScheduler`], will be ignored for others.
354
- Returns:
355
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
356
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
357
- When returning a tuple, the first element is a list with the generated images, and the second element is a
358
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
359
- (nsfw) content, according to the `safety_checker`.
360
- """
361
- if height % 8 != 0 or width % 8 != 0:
362
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
363
- if self.text_embeddings is None:
364
- raise ValueError("Please run the pipe.train() before trying to generate an image.")
365
- if self.text_embeddings_orig is None:
366
- raise ValueError("Please run the pipe.train() before trying to generate an image.")
367
-
368
- text_embeddings = alpha * self.text_embeddings_orig + (1 - alpha) * self.text_embeddings
369
-
370
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
371
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
372
- # corresponds to doing no classifier free guidance.
373
- do_classifier_free_guidance = guidance_scale > 1.0
374
- # get unconditional embeddings for classifier free guidance
375
- if do_classifier_free_guidance:
376
- uncond_tokens = [""]
377
- max_length = self.tokenizer.model_max_length
378
- uncond_input = self.tokenizer(
379
- uncond_tokens,
380
- padding="max_length",
381
- max_length=max_length,
382
- truncation=True,
383
- return_tensors="pt",
384
- )
385
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
386
-
387
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
388
- seq_len = uncond_embeddings.shape[1]
389
- uncond_embeddings = uncond_embeddings.view(1, seq_len, -1)
390
-
391
- # For classifier free guidance, we need to do two forward passes.
392
- # Here we concatenate the unconditional and text embeddings into a single batch
393
- # to avoid doing two forward passes
394
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
395
-
396
- # get the initial random noise unless the user supplied it
397
-
398
- # Unlike in other pipelines, latents need to be generated in the target device
399
- # for 1-to-1 results reproducibility with the CompVis implementation.
400
- # However this currently doesn't work in `mps`.
401
- latents_shape = (1, self.unet.config.in_channels, height // 8, width // 8)
402
- latents_dtype = text_embeddings.dtype
403
- if self.device.type == "mps":
404
- # randn does not exist on mps
405
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
406
- self.device
407
- )
408
- else:
409
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
410
-
411
- # set timesteps
412
- self.scheduler.set_timesteps(num_inference_steps)
413
-
414
- # Some schedulers like PNDM have timesteps as arrays
415
- # It's more optimized to move all timesteps to correct device beforehand
416
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
417
-
418
- # scale the initial noise by the standard deviation required by the scheduler
419
- latents = latents * self.scheduler.init_noise_sigma
420
-
421
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
422
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
423
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
424
- # and should be between [0, 1]
425
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
426
- extra_step_kwargs = {}
427
- if accepts_eta:
428
- extra_step_kwargs["eta"] = eta
429
-
430
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
431
- # expand the latents if we are doing classifier free guidance
432
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
433
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
434
-
435
- # predict the noise residual
436
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
437
-
438
- # perform guidance
439
- if do_classifier_free_guidance:
440
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
441
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
442
-
443
- # compute the previous noisy sample x_t -> x_t-1
444
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
445
-
446
- latents = 1 / 0.18215 * latents
447
- image = self.vae.decode(latents).sample
448
-
449
- image = (image / 2 + 0.5).clamp(0, 1)
450
-
451
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
452
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
453
-
454
- if self.safety_checker is not None:
455
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
456
- self.device
457
- )
458
- image, has_nsfw_concept = self.safety_checker(
459
- images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
460
- )
461
- else:
462
- has_nsfw_concept = None
463
-
464
- if output_type == "pil":
465
- image = self.numpy_to_pil(image)
466
-
467
- if not return_dict:
468
- return (image, has_nsfw_concept)
469
-
470
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
img2img_inpainting.py DELETED
@@ -1,437 +0,0 @@
1
- import inspect
2
- from typing import Callable, List, Optional, Tuple, Union
3
-
4
- import numpy as np
5
- import PIL.Image
6
- import torch
7
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
8
-
9
- from diffusers import DiffusionPipeline
10
- from diffusers.configuration_utils import FrozenDict
11
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
14
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
15
- from diffusers.utils import deprecate, logging
16
-
17
-
18
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
19
-
20
-
21
- def prepare_mask_and_masked_image(image, mask):
22
- image = np.array(image.convert("RGB"))
23
- image = image[None].transpose(0, 3, 1, 2)
24
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
25
-
26
- mask = np.array(mask.convert("L"))
27
- mask = mask.astype(np.float32) / 255.0
28
- mask = mask[None, None]
29
- mask[mask < 0.5] = 0
30
- mask[mask >= 0.5] = 1
31
- mask = torch.from_numpy(mask)
32
-
33
- masked_image = image * (mask < 0.5)
34
-
35
- return mask, masked_image
36
-
37
-
38
- def check_size(image, height, width):
39
- if isinstance(image, PIL.Image.Image):
40
- w, h = image.size
41
- elif isinstance(image, torch.Tensor):
42
- *_, h, w = image.shape
43
-
44
- if h != height or w != width:
45
- raise ValueError(f"Image size should be {height}x{width}, but got {h}x{w}")
46
-
47
-
48
- def overlay_inner_image(image, inner_image, paste_offset: Tuple[int] = (0, 0)):
49
- inner_image = inner_image.convert("RGBA")
50
- image = image.convert("RGB")
51
-
52
- image.paste(inner_image, paste_offset, inner_image)
53
- image = image.convert("RGB")
54
-
55
- return image
56
-
57
-
58
- class ImageToImageInpaintingPipeline(DiffusionPipeline):
59
- r"""
60
- Pipeline for text-guided image-to-image inpainting using Stable Diffusion. *This is an experimental feature*.
61
-
62
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
63
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
64
-
65
- Args:
66
- vae ([`AutoencoderKL`]):
67
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
- text_encoder ([`CLIPTextModel`]):
69
- Frozen text-encoder. Stable Diffusion uses the text portion of
70
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
- tokenizer (`CLIPTokenizer`):
73
- Tokenizer of class
74
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
- scheduler ([`SchedulerMixin`]):
77
- A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
- safety_checker ([`StableDiffusionSafetyChecker`]):
80
- Classification module that estimates whether generated images could be considered offensive or harmful.
81
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
- feature_extractor ([`CLIPImageProcessor`]):
83
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
- """
85
-
86
- def __init__(
87
- self,
88
- vae: AutoencoderKL,
89
- text_encoder: CLIPTextModel,
90
- tokenizer: CLIPTokenizer,
91
- unet: UNet2DConditionModel,
92
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
93
- safety_checker: StableDiffusionSafetyChecker,
94
- feature_extractor: CLIPImageProcessor,
95
- ):
96
- super().__init__()
97
-
98
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
99
- deprecation_message = (
100
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
101
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
102
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
103
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
104
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
105
- " file"
106
- )
107
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
108
- new_config = dict(scheduler.config)
109
- new_config["steps_offset"] = 1
110
- scheduler._internal_dict = FrozenDict(new_config)
111
-
112
- if safety_checker is None:
113
- logger.warning(
114
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
115
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
116
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
117
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
118
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
119
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
120
- )
121
-
122
- self.register_modules(
123
- vae=vae,
124
- text_encoder=text_encoder,
125
- tokenizer=tokenizer,
126
- unet=unet,
127
- scheduler=scheduler,
128
- safety_checker=safety_checker,
129
- feature_extractor=feature_extractor,
130
- )
131
-
132
- @torch.no_grad()
133
- def __call__(
134
- self,
135
- prompt: Union[str, List[str]],
136
- image: Union[torch.Tensor, PIL.Image.Image],
137
- inner_image: Union[torch.Tensor, PIL.Image.Image],
138
- mask_image: Union[torch.Tensor, PIL.Image.Image],
139
- height: int = 512,
140
- width: int = 512,
141
- num_inference_steps: int = 50,
142
- guidance_scale: float = 7.5,
143
- negative_prompt: Optional[Union[str, List[str]]] = None,
144
- num_images_per_prompt: Optional[int] = 1,
145
- eta: float = 0.0,
146
- generator: Optional[torch.Generator] = None,
147
- latents: Optional[torch.Tensor] = None,
148
- output_type: Optional[str] = "pil",
149
- return_dict: bool = True,
150
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
151
- callback_steps: int = 1,
152
- **kwargs,
153
- ):
154
- r"""
155
- Function invoked when calling the pipeline for generation.
156
-
157
- Args:
158
- prompt (`str` or `List[str]`):
159
- The prompt or prompts to guide the image generation.
160
- image (`torch.Tensor` or `PIL.Image.Image`):
161
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
162
- be masked out with `mask_image` and repainted according to `prompt`.
163
- inner_image (`torch.Tensor` or `PIL.Image.Image`):
164
- `Image`, or tensor representing an image batch which will be overlayed onto `image`. Non-transparent
165
- regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
166
- the last channel representing the alpha channel, which will be used to blend `inner_image` with
167
- `image`. If not provided, it will be forcibly cast to RGBA.
168
- mask_image (`PIL.Image.Image`):
169
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
170
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
171
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
172
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
173
- height (`int`, *optional*, defaults to 512):
174
- The height in pixels of the generated image.
175
- width (`int`, *optional*, defaults to 512):
176
- The width in pixels of the generated image.
177
- num_inference_steps (`int`, *optional*, defaults to 50):
178
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
179
- expense of slower inference.
180
- guidance_scale (`float`, *optional*, defaults to 7.5):
181
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
182
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
183
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
184
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
185
- usually at the expense of lower image quality.
186
- negative_prompt (`str` or `List[str]`, *optional*):
187
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
188
- if `guidance_scale` is less than `1`).
189
- num_images_per_prompt (`int`, *optional*, defaults to 1):
190
- The number of images to generate per prompt.
191
- eta (`float`, *optional*, defaults to 0.0):
192
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
193
- [`schedulers.DDIMScheduler`], will be ignored for others.
194
- generator (`torch.Generator`, *optional*):
195
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
196
- deterministic.
197
- latents (`torch.Tensor`, *optional*):
198
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
199
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
200
- tensor will ge generated by sampling using the supplied random `generator`.
201
- output_type (`str`, *optional*, defaults to `"pil"`):
202
- The output format of the generate image. Choose between
203
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
204
- return_dict (`bool`, *optional*, defaults to `True`):
205
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
206
- plain tuple.
207
- callback (`Callable`, *optional*):
208
- A function that will be called every `callback_steps` steps during inference. The function will be
209
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
210
- callback_steps (`int`, *optional*, defaults to 1):
211
- The frequency at which the `callback` function will be called. If not specified, the callback will be
212
- called at every step.
213
-
214
- Returns:
215
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
216
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
217
- When returning a tuple, the first element is a list with the generated images, and the second element is a
218
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
219
- (nsfw) content, according to the `safety_checker`.
220
- """
221
-
222
- if isinstance(prompt, str):
223
- batch_size = 1
224
- elif isinstance(prompt, list):
225
- batch_size = len(prompt)
226
- else:
227
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
228
-
229
- if height % 8 != 0 or width % 8 != 0:
230
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
231
-
232
- if (callback_steps is None) or (
233
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
234
- ):
235
- raise ValueError(
236
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
237
- f" {type(callback_steps)}."
238
- )
239
-
240
- # check if input sizes are correct
241
- check_size(image, height, width)
242
- check_size(inner_image, height, width)
243
- check_size(mask_image, height, width)
244
-
245
- # get prompt text embeddings
246
- text_inputs = self.tokenizer(
247
- prompt,
248
- padding="max_length",
249
- max_length=self.tokenizer.model_max_length,
250
- return_tensors="pt",
251
- )
252
- text_input_ids = text_inputs.input_ids
253
-
254
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
255
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
256
- logger.warning(
257
- "The following part of your input was truncated because CLIP can only handle sequences up to"
258
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
259
- )
260
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
261
- text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
262
-
263
- # duplicate text embeddings for each generation per prompt, using mps friendly method
264
- bs_embed, seq_len, _ = text_embeddings.shape
265
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
266
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
267
-
268
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
269
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
270
- # corresponds to doing no classifier free guidance.
271
- do_classifier_free_guidance = guidance_scale > 1.0
272
- # get unconditional embeddings for classifier free guidance
273
- if do_classifier_free_guidance:
274
- uncond_tokens: List[str]
275
- if negative_prompt is None:
276
- uncond_tokens = [""]
277
- elif type(prompt) is not type(negative_prompt):
278
- raise TypeError(
279
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
280
- f" {type(prompt)}."
281
- )
282
- elif isinstance(negative_prompt, str):
283
- uncond_tokens = [negative_prompt]
284
- elif batch_size != len(negative_prompt):
285
- raise ValueError(
286
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
287
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
288
- " the batch size of `prompt`."
289
- )
290
- else:
291
- uncond_tokens = negative_prompt
292
-
293
- max_length = text_input_ids.shape[-1]
294
- uncond_input = self.tokenizer(
295
- uncond_tokens,
296
- padding="max_length",
297
- max_length=max_length,
298
- truncation=True,
299
- return_tensors="pt",
300
- )
301
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
302
-
303
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
304
- seq_len = uncond_embeddings.shape[1]
305
- uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
306
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
307
-
308
- # For classifier free guidance, we need to do two forward passes.
309
- # Here we concatenate the unconditional and text embeddings into a single batch
310
- # to avoid doing two forward passes
311
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
312
-
313
- # get the initial random noise unless the user supplied it
314
- # Unlike in other pipelines, latents need to be generated in the target device
315
- # for 1-to-1 results reproducibility with the CompVis implementation.
316
- # However this currently doesn't work in `mps`.
317
- num_channels_latents = self.vae.config.latent_channels
318
- latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
319
- latents_dtype = text_embeddings.dtype
320
- if latents is None:
321
- if self.device.type == "mps":
322
- # randn does not exist on mps
323
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
324
- self.device
325
- )
326
- else:
327
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
328
- else:
329
- if latents.shape != latents_shape:
330
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
331
- latents = latents.to(self.device)
332
-
333
- # overlay the inner image
334
- image = overlay_inner_image(image, inner_image)
335
-
336
- # prepare mask and masked_image
337
- mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
338
- mask = mask.to(device=self.device, dtype=text_embeddings.dtype)
339
- masked_image = masked_image.to(device=self.device, dtype=text_embeddings.dtype)
340
-
341
- # resize the mask to latents shape as we concatenate the mask to the latents
342
- mask = torch.nn.functional.interpolate(mask, size=(height // 8, width // 8))
343
-
344
- # encode the mask image into latents space so we can concatenate it to the latents
345
- masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
346
- masked_image_latents = 0.18215 * masked_image_latents
347
-
348
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
349
- mask = mask.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
350
- masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
351
-
352
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
353
- masked_image_latents = (
354
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
355
- )
356
-
357
- num_channels_mask = mask.shape[1]
358
- num_channels_masked_image = masked_image_latents.shape[1]
359
-
360
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
361
- raise ValueError(
362
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
363
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
364
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
365
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
366
- " `pipeline.unet` or your `mask_image` or `image` input."
367
- )
368
-
369
- # set timesteps
370
- self.scheduler.set_timesteps(num_inference_steps)
371
-
372
- # Some schedulers like PNDM have timesteps as arrays
373
- # It's more optimized to move all timesteps to correct device beforehand
374
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
375
-
376
- # scale the initial noise by the standard deviation required by the scheduler
377
- latents = latents * self.scheduler.init_noise_sigma
378
-
379
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
380
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
381
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
382
- # and should be between [0, 1]
383
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
384
- extra_step_kwargs = {}
385
- if accepts_eta:
386
- extra_step_kwargs["eta"] = eta
387
-
388
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
389
- # expand the latents if we are doing classifier free guidance
390
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
391
-
392
- # concat latents, mask, masked_image_latents in the channel dimension
393
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
394
-
395
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
396
-
397
- # predict the noise residual
398
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
399
-
400
- # perform guidance
401
- if do_classifier_free_guidance:
402
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
403
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
404
-
405
- # compute the previous noisy sample x_t -> x_t-1
406
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
407
-
408
- # call the callback, if provided
409
- if callback is not None and i % callback_steps == 0:
410
- step_idx = i // getattr(self.scheduler, "order", 1)
411
- callback(step_idx, t, latents)
412
-
413
- latents = 1 / 0.18215 * latents
414
- image = self.vae.decode(latents).sample
415
-
416
- image = (image / 2 + 0.5).clamp(0, 1)
417
-
418
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
419
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
420
-
421
- if self.safety_checker is not None:
422
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
423
- self.device
424
- )
425
- image, has_nsfw_concept = self.safety_checker(
426
- images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
427
- )
428
- else:
429
- has_nsfw_concept = None
430
-
431
- if output_type == "pil":
432
- image = self.numpy_to_pil(image)
433
-
434
- if not return_dict:
435
- return (image, has_nsfw_concept)
436
-
437
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
instaflow_one_step.py DELETED
@@ -1,685 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Union
17
-
18
- import torch
19
- from packaging import version
20
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
21
-
22
- from diffusers.configuration_utils import FrozenDict
23
- from diffusers.image_processor import VaeImageProcessor
24
- from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
25
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
26
- from diffusers.models.lora import adjust_lora_scale_text_encoder
27
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
28
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
29
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
30
- from diffusers.schedulers import KarrasDiffusionSchedulers
31
- from diffusers.utils import (
32
- deprecate,
33
- logging,
34
- )
35
- from diffusers.utils.torch_utils import randn_tensor
36
-
37
-
38
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
-
40
-
41
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
42
- """
43
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
44
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
45
- """
46
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
47
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
48
- # rescale the results from guidance (fixes overexposure)
49
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
50
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
51
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
52
- return noise_cfg
53
-
54
-
55
- class InstaFlowPipeline(
56
- DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
57
- ):
58
- r"""
59
- Pipeline for text-to-image generation using Rectified Flow and Euler discretization.
60
- This customized pipeline is based on StableDiffusionPipeline from the official Diffusers library (0.21.4)
61
-
62
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
63
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
64
-
65
- The pipeline also inherits the following loading methods:
66
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
67
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
68
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
69
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
70
-
71
- Args:
72
- vae ([`AutoencoderKL`]):
73
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
74
- text_encoder ([`~transformers.CLIPTextModel`]):
75
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
76
- tokenizer ([`~transformers.CLIPTokenizer`]):
77
- A `CLIPTokenizer` to tokenize text.
78
- unet ([`UNet2DConditionModel`]):
79
- A `UNet2DConditionModel` to denoise the encoded image latents.
80
- scheduler ([`SchedulerMixin`]):
81
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
82
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
83
- safety_checker ([`StableDiffusionSafetyChecker`]):
84
- Classification module that estimates whether generated images could be considered offensive or harmful.
85
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
86
- about a model's potential harms.
87
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
88
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
89
- """
90
-
91
- model_cpu_offload_seq = "text_encoder->unet->vae"
92
- _optional_components = ["safety_checker", "feature_extractor"]
93
- _exclude_from_cpu_offload = ["safety_checker"]
94
-
95
- def __init__(
96
- self,
97
- vae: AutoencoderKL,
98
- text_encoder: CLIPTextModel,
99
- tokenizer: CLIPTokenizer,
100
- unet: UNet2DConditionModel,
101
- scheduler: KarrasDiffusionSchedulers,
102
- safety_checker: StableDiffusionSafetyChecker,
103
- feature_extractor: CLIPImageProcessor,
104
- requires_safety_checker: bool = True,
105
- ):
106
- super().__init__()
107
-
108
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
109
- deprecation_message = (
110
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
111
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
112
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
113
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
114
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
115
- " file"
116
- )
117
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
118
- new_config = dict(scheduler.config)
119
- new_config["steps_offset"] = 1
120
- scheduler._internal_dict = FrozenDict(new_config)
121
-
122
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
123
- deprecation_message = (
124
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
125
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
126
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
127
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
128
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
129
- )
130
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
131
- new_config = dict(scheduler.config)
132
- new_config["clip_sample"] = False
133
- scheduler._internal_dict = FrozenDict(new_config)
134
-
135
- if safety_checker is None and requires_safety_checker:
136
- logger.warning(
137
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
138
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
139
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
140
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
141
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
142
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
143
- )
144
-
145
- if safety_checker is not None and feature_extractor is None:
146
- raise ValueError(
147
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
148
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
149
- )
150
-
151
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
152
- version.parse(unet.config._diffusers_version).base_version
153
- ) < version.parse("0.9.0.dev0")
154
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
155
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
156
- deprecation_message = (
157
- "The configuration file of the unet has set the default `sample_size` to smaller than"
158
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
159
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
160
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
161
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
162
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
163
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
164
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
165
- " the `unet/config.json` file"
166
- )
167
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
168
- new_config = dict(unet.config)
169
- new_config["sample_size"] = 64
170
- unet._internal_dict = FrozenDict(new_config)
171
-
172
- self.register_modules(
173
- vae=vae,
174
- text_encoder=text_encoder,
175
- tokenizer=tokenizer,
176
- unet=unet,
177
- scheduler=scheduler,
178
- safety_checker=safety_checker,
179
- feature_extractor=feature_extractor,
180
- )
181
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
182
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
183
- self.register_to_config(requires_safety_checker=requires_safety_checker)
184
-
185
- def _encode_prompt(
186
- self,
187
- prompt,
188
- device,
189
- num_images_per_prompt,
190
- do_classifier_free_guidance,
191
- negative_prompt=None,
192
- prompt_embeds: Optional[torch.Tensor] = None,
193
- negative_prompt_embeds: Optional[torch.Tensor] = None,
194
- lora_scale: Optional[float] = None,
195
- ):
196
- deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
197
- deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
198
-
199
- prompt_embeds_tuple = self.encode_prompt(
200
- prompt=prompt,
201
- device=device,
202
- num_images_per_prompt=num_images_per_prompt,
203
- do_classifier_free_guidance=do_classifier_free_guidance,
204
- negative_prompt=negative_prompt,
205
- prompt_embeds=prompt_embeds,
206
- negative_prompt_embeds=negative_prompt_embeds,
207
- lora_scale=lora_scale,
208
- )
209
-
210
- # concatenate for backwards comp
211
- prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
212
-
213
- return prompt_embeds
214
-
215
- def encode_prompt(
216
- self,
217
- prompt,
218
- device,
219
- num_images_per_prompt,
220
- do_classifier_free_guidance,
221
- negative_prompt=None,
222
- prompt_embeds: Optional[torch.Tensor] = None,
223
- negative_prompt_embeds: Optional[torch.Tensor] = None,
224
- lora_scale: Optional[float] = None,
225
- ):
226
- r"""
227
- Encodes the prompt into text encoder hidden states.
228
-
229
- Args:
230
- prompt (`str` or `List[str]`, *optional*):
231
- prompt to be encoded
232
- device: (`torch.device`):
233
- torch device
234
- num_images_per_prompt (`int`):
235
- number of images that should be generated per prompt
236
- do_classifier_free_guidance (`bool`):
237
- whether to use classifier free guidance or not
238
- negative_prompt (`str` or `List[str]`, *optional*):
239
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
240
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
241
- less than `1`).
242
- prompt_embeds (`torch.Tensor`, *optional*):
243
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
244
- provided, text embeddings will be generated from `prompt` input argument.
245
- negative_prompt_embeds (`torch.Tensor`, *optional*):
246
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
247
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
248
- argument.
249
- lora_scale (`float`, *optional*):
250
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
251
- """
252
- # set lora scale so that monkey patched LoRA
253
- # function of text encoder can correctly access it
254
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
255
- self._lora_scale = lora_scale
256
-
257
- # dynamically adjust the LoRA scale
258
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
259
-
260
- if prompt is not None and isinstance(prompt, str):
261
- batch_size = 1
262
- elif prompt is not None and isinstance(prompt, list):
263
- batch_size = len(prompt)
264
- else:
265
- batch_size = prompt_embeds.shape[0]
266
-
267
- if prompt_embeds is None:
268
- # textual inversion: procecss multi-vector tokens if necessary
269
- if isinstance(self, TextualInversionLoaderMixin):
270
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
271
-
272
- text_inputs = self.tokenizer(
273
- prompt,
274
- padding="max_length",
275
- max_length=self.tokenizer.model_max_length,
276
- truncation=True,
277
- return_tensors="pt",
278
- )
279
- text_input_ids = text_inputs.input_ids
280
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
281
-
282
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
283
- text_input_ids, untruncated_ids
284
- ):
285
- removed_text = self.tokenizer.batch_decode(
286
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
287
- )
288
- logger.warning(
289
- "The following part of your input was truncated because CLIP can only handle sequences up to"
290
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
291
- )
292
-
293
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
294
- attention_mask = text_inputs.attention_mask.to(device)
295
- else:
296
- attention_mask = None
297
-
298
- prompt_embeds = self.text_encoder(
299
- text_input_ids.to(device),
300
- attention_mask=attention_mask,
301
- )
302
- prompt_embeds = prompt_embeds[0]
303
-
304
- if self.text_encoder is not None:
305
- prompt_embeds_dtype = self.text_encoder.dtype
306
- elif self.unet is not None:
307
- prompt_embeds_dtype = self.unet.dtype
308
- else:
309
- prompt_embeds_dtype = prompt_embeds.dtype
310
-
311
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
312
-
313
- bs_embed, seq_len, _ = prompt_embeds.shape
314
- # duplicate text embeddings for each generation per prompt, using mps friendly method
315
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
316
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
317
-
318
- # get unconditional embeddings for classifier free guidance
319
- if do_classifier_free_guidance and negative_prompt_embeds is None:
320
- uncond_tokens: List[str]
321
- if negative_prompt is None:
322
- uncond_tokens = [""] * batch_size
323
- elif prompt is not None and type(prompt) is not type(negative_prompt):
324
- raise TypeError(
325
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
326
- f" {type(prompt)}."
327
- )
328
- elif isinstance(negative_prompt, str):
329
- uncond_tokens = [negative_prompt]
330
- elif batch_size != len(negative_prompt):
331
- raise ValueError(
332
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
333
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
334
- " the batch size of `prompt`."
335
- )
336
- else:
337
- uncond_tokens = negative_prompt
338
-
339
- # textual inversion: procecss multi-vector tokens if necessary
340
- if isinstance(self, TextualInversionLoaderMixin):
341
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
342
-
343
- max_length = prompt_embeds.shape[1]
344
- uncond_input = self.tokenizer(
345
- uncond_tokens,
346
- padding="max_length",
347
- max_length=max_length,
348
- truncation=True,
349
- return_tensors="pt",
350
- )
351
-
352
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
353
- attention_mask = uncond_input.attention_mask.to(device)
354
- else:
355
- attention_mask = None
356
-
357
- negative_prompt_embeds = self.text_encoder(
358
- uncond_input.input_ids.to(device),
359
- attention_mask=attention_mask,
360
- )
361
- negative_prompt_embeds = negative_prompt_embeds[0]
362
-
363
- if do_classifier_free_guidance:
364
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
365
- seq_len = negative_prompt_embeds.shape[1]
366
-
367
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
368
-
369
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
370
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
371
-
372
- return prompt_embeds, negative_prompt_embeds
373
-
374
- def run_safety_checker(self, image, device, dtype):
375
- if self.safety_checker is None:
376
- has_nsfw_concept = None
377
- else:
378
- if torch.is_tensor(image):
379
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
380
- else:
381
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
382
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
383
- image, has_nsfw_concept = self.safety_checker(
384
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
385
- )
386
- return image, has_nsfw_concept
387
-
388
- def decode_latents(self, latents):
389
- deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
390
- deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
391
-
392
- latents = 1 / self.vae.config.scaling_factor * latents
393
- image = self.vae.decode(latents, return_dict=False)[0]
394
- image = (image / 2 + 0.5).clamp(0, 1)
395
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
396
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
397
- return image
398
-
399
- def merge_dW_to_unet(pipe, dW_dict, alpha=1.0):
400
- _tmp_sd = pipe.unet.state_dict()
401
- for key in dW_dict.keys():
402
- _tmp_sd[key] += dW_dict[key] * alpha
403
- pipe.unet.load_state_dict(_tmp_sd, strict=False)
404
- return pipe
405
-
406
- def prepare_extra_step_kwargs(self, generator, eta):
407
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
408
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
409
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
410
- # and should be between [0, 1]
411
-
412
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
413
- extra_step_kwargs = {}
414
- if accepts_eta:
415
- extra_step_kwargs["eta"] = eta
416
-
417
- # check if the scheduler accepts generator
418
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
419
- if accepts_generator:
420
- extra_step_kwargs["generator"] = generator
421
- return extra_step_kwargs
422
-
423
- def check_inputs(
424
- self,
425
- prompt,
426
- height,
427
- width,
428
- callback_steps,
429
- negative_prompt=None,
430
- prompt_embeds=None,
431
- negative_prompt_embeds=None,
432
- ):
433
- if height % 8 != 0 or width % 8 != 0:
434
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
435
-
436
- if (callback_steps is None) or (
437
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
438
- ):
439
- raise ValueError(
440
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
441
- f" {type(callback_steps)}."
442
- )
443
-
444
- if prompt is not None and prompt_embeds is not None:
445
- raise ValueError(
446
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
447
- " only forward one of the two."
448
- )
449
- elif prompt is None and prompt_embeds is None:
450
- raise ValueError(
451
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
452
- )
453
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
454
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
455
-
456
- if negative_prompt is not None and negative_prompt_embeds is not None:
457
- raise ValueError(
458
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
459
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
460
- )
461
-
462
- if prompt_embeds is not None and negative_prompt_embeds is not None:
463
- if prompt_embeds.shape != negative_prompt_embeds.shape:
464
- raise ValueError(
465
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
466
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
467
- f" {negative_prompt_embeds.shape}."
468
- )
469
-
470
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
471
- shape = (
472
- batch_size,
473
- num_channels_latents,
474
- int(height) // self.vae_scale_factor,
475
- int(width) // self.vae_scale_factor,
476
- )
477
- if isinstance(generator, list) and len(generator) != batch_size:
478
- raise ValueError(
479
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
480
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
481
- )
482
-
483
- if latents is None:
484
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
485
- else:
486
- latents = latents.to(device)
487
-
488
- # scale the initial noise by the standard deviation required by the scheduler
489
- latents = latents * self.scheduler.init_noise_sigma
490
- return latents
491
-
492
- @torch.no_grad()
493
- def __call__(
494
- self,
495
- prompt: Union[str, List[str]] = None,
496
- height: Optional[int] = None,
497
- width: Optional[int] = None,
498
- num_inference_steps: int = 50,
499
- guidance_scale: float = 7.5,
500
- negative_prompt: Optional[Union[str, List[str]]] = None,
501
- num_images_per_prompt: Optional[int] = 1,
502
- eta: float = 0.0,
503
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
504
- latents: Optional[torch.Tensor] = None,
505
- prompt_embeds: Optional[torch.Tensor] = None,
506
- negative_prompt_embeds: Optional[torch.Tensor] = None,
507
- output_type: Optional[str] = "pil",
508
- return_dict: bool = True,
509
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
510
- callback_steps: int = 1,
511
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
512
- guidance_rescale: float = 0.0,
513
- ):
514
- r"""
515
- The call function to the pipeline for generation.
516
-
517
- Args:
518
- prompt (`str` or `List[str]`, *optional*):
519
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
520
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
521
- The height in pixels of the generated image.
522
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
523
- The width in pixels of the generated image.
524
- num_inference_steps (`int`, *optional*, defaults to 50):
525
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
526
- expense of slower inference.
527
- guidance_scale (`float`, *optional*, defaults to 7.5):
528
- A higher guidance scale value encourages the model to generate images closely linked to the text
529
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
530
- negative_prompt (`str` or `List[str]`, *optional*):
531
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
532
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
533
- num_images_per_prompt (`int`, *optional*, defaults to 1):
534
- The number of images to generate per prompt.
535
- eta (`float`, *optional*, defaults to 0.0):
536
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
537
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
538
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
539
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
540
- generation deterministic.
541
- latents (`torch.Tensor`, *optional*):
542
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
543
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
544
- tensor is generated by sampling using the supplied random `generator`.
545
- prompt_embeds (`torch.Tensor`, *optional*):
546
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
547
- provided, text embeddings are generated from the `prompt` input argument.
548
- negative_prompt_embeds (`torch.Tensor`, *optional*):
549
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
550
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
551
- output_type (`str`, *optional*, defaults to `"pil"`):
552
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
553
- return_dict (`bool`, *optional*, defaults to `True`):
554
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
555
- plain tuple.
556
- callback (`Callable`, *optional*):
557
- A function that calls every `callback_steps` steps during inference. The function is called with the
558
- following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
559
- callback_steps (`int`, *optional*, defaults to 1):
560
- The frequency at which the `callback` function is called. If not specified, the callback is called at
561
- every step.
562
- cross_attention_kwargs (`dict`, *optional*):
563
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
564
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
565
- guidance_rescale (`float`, *optional*, defaults to 0.7):
566
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
567
- Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
568
- using zero terminal SNR.
569
-
570
- Examples:
571
-
572
- Returns:
573
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
574
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
575
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
576
- second element is a list of `bool`s indicating whether the corresponding generated image contains
577
- "not-safe-for-work" (nsfw) content.
578
- """
579
- # 0. Default height and width to unet
580
- height = height or self.unet.config.sample_size * self.vae_scale_factor
581
- width = width or self.unet.config.sample_size * self.vae_scale_factor
582
-
583
- # 1. Check inputs. Raise error if not correct
584
- self.check_inputs(
585
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
586
- )
587
-
588
- # 2. Define call parameters
589
- if prompt is not None and isinstance(prompt, str):
590
- batch_size = 1
591
- elif prompt is not None and isinstance(prompt, list):
592
- batch_size = len(prompt)
593
- else:
594
- batch_size = prompt_embeds.shape[0]
595
-
596
- device = self._execution_device
597
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
598
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
599
- # corresponds to doing no classifier free guidance.
600
- do_classifier_free_guidance = guidance_scale > 1.0
601
-
602
- # 3. Encode input prompt
603
- text_encoder_lora_scale = (
604
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
605
- )
606
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
607
- prompt,
608
- device,
609
- num_images_per_prompt,
610
- do_classifier_free_guidance,
611
- negative_prompt,
612
- prompt_embeds=prompt_embeds,
613
- negative_prompt_embeds=negative_prompt_embeds,
614
- lora_scale=text_encoder_lora_scale,
615
- )
616
- # For classifier free guidance, we need to do two forward passes.
617
- # Here we concatenate the unconditional and text embeddings into a single batch
618
- # to avoid doing two forward passes
619
- if do_classifier_free_guidance:
620
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
621
-
622
- # 4. Prepare timesteps
623
- timesteps = [(1.0 - i / num_inference_steps) * 1000.0 for i in range(num_inference_steps)]
624
-
625
- # 5. Prepare latent variables
626
- num_channels_latents = self.unet.config.in_channels
627
- latents = self.prepare_latents(
628
- batch_size * num_images_per_prompt,
629
- num_channels_latents,
630
- height,
631
- width,
632
- prompt_embeds.dtype,
633
- device,
634
- generator,
635
- latents,
636
- )
637
-
638
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
639
- dt = 1.0 / num_inference_steps
640
-
641
- # 7. Denoising loop of Euler discretization from t = 0 to t = 1
642
- with self.progress_bar(total=num_inference_steps) as progress_bar:
643
- for i, t in enumerate(timesteps):
644
- # expand the latents if we are doing classifier free guidance
645
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
646
-
647
- vec_t = torch.ones((latent_model_input.shape[0],), device=latents.device) * t
648
-
649
- v_pred = self.unet(latent_model_input, vec_t, encoder_hidden_states=prompt_embeds).sample
650
-
651
- # perform guidance
652
- if do_classifier_free_guidance:
653
- v_pred_neg, v_pred_text = v_pred.chunk(2)
654
- v_pred = v_pred_neg + guidance_scale * (v_pred_text - v_pred_neg)
655
-
656
- latents = latents + dt * v_pred
657
-
658
- # call the callback, if provided
659
- if i == len(timesteps) - 1 or ((i + 1) % self.scheduler.order == 0):
660
- progress_bar.update()
661
- if callback is not None and i % callback_steps == 0:
662
- step_idx = i // getattr(self.scheduler, "order", 1)
663
- callback(step_idx, t, latents)
664
-
665
- if not output_type == "latent":
666
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
667
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
668
- else:
669
- image = latents
670
- has_nsfw_concept = None
671
-
672
- if has_nsfw_concept is None:
673
- do_denormalize = [True] * image.shape[0]
674
- else:
675
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
676
-
677
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
678
-
679
- # Offload all models
680
- self.maybe_free_model_hooks()
681
-
682
- if not return_dict:
683
- return (image, has_nsfw_concept)
684
-
685
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
interpolate_stable_diffusion.py DELETED
@@ -1,498 +0,0 @@
1
- import inspect
2
- import time
3
- from pathlib import Path
4
- from typing import Callable, List, Optional, Union
5
-
6
- import numpy as np
7
- import torch
8
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
9
-
10
- from diffusers.configuration_utils import FrozenDict
11
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
13
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
15
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
16
- from diffusers.utils import deprecate, logging
17
-
18
-
19
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
20
-
21
-
22
- def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
23
- """helper function to spherically interpolate two arrays v1 v2"""
24
-
25
- if not isinstance(v0, np.ndarray):
26
- inputs_are_torch = True
27
- input_device = v0.device
28
- v0 = v0.cpu().numpy()
29
- v1 = v1.cpu().numpy()
30
-
31
- dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
32
- if np.abs(dot) > DOT_THRESHOLD:
33
- v2 = (1 - t) * v0 + t * v1
34
- else:
35
- theta_0 = np.arccos(dot)
36
- sin_theta_0 = np.sin(theta_0)
37
- theta_t = theta_0 * t
38
- sin_theta_t = np.sin(theta_t)
39
- s0 = np.sin(theta_0 - theta_t) / sin_theta_0
40
- s1 = sin_theta_t / sin_theta_0
41
- v2 = s0 * v0 + s1 * v1
42
-
43
- if inputs_are_torch:
44
- v2 = torch.from_numpy(v2).to(input_device)
45
-
46
- return v2
47
-
48
-
49
- class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin):
50
- r"""
51
- Pipeline for text-to-image generation using Stable Diffusion.
52
-
53
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
54
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
55
-
56
- Args:
57
- vae ([`AutoencoderKL`]):
58
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
59
- text_encoder ([`CLIPTextModel`]):
60
- Frozen text-encoder. Stable Diffusion uses the text portion of
61
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
62
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
63
- tokenizer (`CLIPTokenizer`):
64
- Tokenizer of class
65
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
66
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
67
- scheduler ([`SchedulerMixin`]):
68
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
69
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
70
- safety_checker ([`StableDiffusionSafetyChecker`]):
71
- Classification module that estimates whether generated images could be considered offensive or harmful.
72
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
73
- feature_extractor ([`CLIPImageProcessor`]):
74
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
75
- """
76
-
77
- def __init__(
78
- self,
79
- vae: AutoencoderKL,
80
- text_encoder: CLIPTextModel,
81
- tokenizer: CLIPTokenizer,
82
- unet: UNet2DConditionModel,
83
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
84
- safety_checker: StableDiffusionSafetyChecker,
85
- feature_extractor: CLIPImageProcessor,
86
- ):
87
- super().__init__()
88
-
89
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
90
- deprecation_message = (
91
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
92
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
93
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
94
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
95
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
96
- " file"
97
- )
98
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
99
- new_config = dict(scheduler.config)
100
- new_config["steps_offset"] = 1
101
- scheduler._internal_dict = FrozenDict(new_config)
102
-
103
- if safety_checker is None:
104
- logger.warning(
105
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
106
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
107
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
108
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
109
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
110
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
111
- )
112
-
113
- self.register_modules(
114
- vae=vae,
115
- text_encoder=text_encoder,
116
- tokenizer=tokenizer,
117
- unet=unet,
118
- scheduler=scheduler,
119
- safety_checker=safety_checker,
120
- feature_extractor=feature_extractor,
121
- )
122
-
123
- @torch.no_grad()
124
- def __call__(
125
- self,
126
- prompt: Optional[Union[str, List[str]]] = None,
127
- height: int = 512,
128
- width: int = 512,
129
- num_inference_steps: int = 50,
130
- guidance_scale: float = 7.5,
131
- negative_prompt: Optional[Union[str, List[str]]] = None,
132
- num_images_per_prompt: Optional[int] = 1,
133
- eta: float = 0.0,
134
- generator: Optional[torch.Generator] = None,
135
- latents: Optional[torch.Tensor] = None,
136
- output_type: Optional[str] = "pil",
137
- return_dict: bool = True,
138
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
139
- callback_steps: int = 1,
140
- text_embeddings: Optional[torch.Tensor] = None,
141
- **kwargs,
142
- ):
143
- r"""
144
- Function invoked when calling the pipeline for generation.
145
-
146
- Args:
147
- prompt (`str` or `List[str]`, *optional*, defaults to `None`):
148
- The prompt or prompts to guide the image generation. If not provided, `text_embeddings` is required.
149
- height (`int`, *optional*, defaults to 512):
150
- The height in pixels of the generated image.
151
- width (`int`, *optional*, defaults to 512):
152
- The width in pixels of the generated image.
153
- num_inference_steps (`int`, *optional*, defaults to 50):
154
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
155
- expense of slower inference.
156
- guidance_scale (`float`, *optional*, defaults to 7.5):
157
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
158
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
159
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
160
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
161
- usually at the expense of lower image quality.
162
- negative_prompt (`str` or `List[str]`, *optional*):
163
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
164
- if `guidance_scale` is less than `1`).
165
- num_images_per_prompt (`int`, *optional*, defaults to 1):
166
- The number of images to generate per prompt.
167
- eta (`float`, *optional*, defaults to 0.0):
168
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
169
- [`schedulers.DDIMScheduler`], will be ignored for others.
170
- generator (`torch.Generator`, *optional*):
171
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
172
- deterministic.
173
- latents (`torch.Tensor`, *optional*):
174
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
175
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
176
- tensor will ge generated by sampling using the supplied random `generator`.
177
- output_type (`str`, *optional*, defaults to `"pil"`):
178
- The output format of the generate image. Choose between
179
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
180
- return_dict (`bool`, *optional*, defaults to `True`):
181
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
182
- plain tuple.
183
- callback (`Callable`, *optional*):
184
- A function that will be called every `callback_steps` steps during inference. The function will be
185
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
186
- callback_steps (`int`, *optional*, defaults to 1):
187
- The frequency at which the `callback` function will be called. If not specified, the callback will be
188
- called at every step.
189
- text_embeddings (`torch.Tensor`, *optional*, defaults to `None`):
190
- Pre-generated text embeddings to be used as inputs for image generation. Can be used in place of
191
- `prompt` to avoid re-computing the embeddings. If not provided, the embeddings will be generated from
192
- the supplied `prompt`.
193
-
194
- Returns:
195
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
196
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
197
- When returning a tuple, the first element is a list with the generated images, and the second element is a
198
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
199
- (nsfw) content, according to the `safety_checker`.
200
- """
201
-
202
- if height % 8 != 0 or width % 8 != 0:
203
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
204
-
205
- if (callback_steps is None) or (
206
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
207
- ):
208
- raise ValueError(
209
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
210
- f" {type(callback_steps)}."
211
- )
212
-
213
- if text_embeddings is None:
214
- if isinstance(prompt, str):
215
- batch_size = 1
216
- elif isinstance(prompt, list):
217
- batch_size = len(prompt)
218
- else:
219
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
220
-
221
- # get prompt text embeddings
222
- text_inputs = self.tokenizer(
223
- prompt,
224
- padding="max_length",
225
- max_length=self.tokenizer.model_max_length,
226
- return_tensors="pt",
227
- )
228
- text_input_ids = text_inputs.input_ids
229
-
230
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
231
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
232
- print(
233
- "The following part of your input was truncated because CLIP can only handle sequences up to"
234
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
235
- )
236
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
237
- text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
238
- else:
239
- batch_size = text_embeddings.shape[0]
240
-
241
- # duplicate text embeddings for each generation per prompt, using mps friendly method
242
- bs_embed, seq_len, _ = text_embeddings.shape
243
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
244
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
245
-
246
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
247
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
248
- # corresponds to doing no classifier free guidance.
249
- do_classifier_free_guidance = guidance_scale > 1.0
250
- # get unconditional embeddings for classifier free guidance
251
- if do_classifier_free_guidance:
252
- uncond_tokens: List[str]
253
- if negative_prompt is None:
254
- uncond_tokens = [""] * batch_size
255
- elif type(prompt) is not type(negative_prompt):
256
- raise TypeError(
257
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
258
- f" {type(prompt)}."
259
- )
260
- elif isinstance(negative_prompt, str):
261
- uncond_tokens = [negative_prompt]
262
- elif batch_size != len(negative_prompt):
263
- raise ValueError(
264
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
265
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
266
- " the batch size of `prompt`."
267
- )
268
- else:
269
- uncond_tokens = negative_prompt
270
-
271
- max_length = self.tokenizer.model_max_length
272
- uncond_input = self.tokenizer(
273
- uncond_tokens,
274
- padding="max_length",
275
- max_length=max_length,
276
- truncation=True,
277
- return_tensors="pt",
278
- )
279
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
280
-
281
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
282
- seq_len = uncond_embeddings.shape[1]
283
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
284
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
285
-
286
- # For classifier free guidance, we need to do two forward passes.
287
- # Here we concatenate the unconditional and text embeddings into a single batch
288
- # to avoid doing two forward passes
289
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
290
-
291
- # get the initial random noise unless the user supplied it
292
-
293
- # Unlike in other pipelines, latents need to be generated in the target device
294
- # for 1-to-1 results reproducibility with the CompVis implementation.
295
- # However this currently doesn't work in `mps`.
296
- latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
297
- latents_dtype = text_embeddings.dtype
298
- if latents is None:
299
- if self.device.type == "mps":
300
- # randn does not work reproducibly on mps
301
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
302
- self.device
303
- )
304
- else:
305
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
306
- else:
307
- if latents.shape != latents_shape:
308
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
309
- latents = latents.to(self.device)
310
-
311
- # set timesteps
312
- self.scheduler.set_timesteps(num_inference_steps)
313
-
314
- # Some schedulers like PNDM have timesteps as arrays
315
- # It's more optimized to move all timesteps to correct device beforehand
316
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
317
-
318
- # scale the initial noise by the standard deviation required by the scheduler
319
- latents = latents * self.scheduler.init_noise_sigma
320
-
321
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
322
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
323
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
324
- # and should be between [0, 1]
325
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
326
- extra_step_kwargs = {}
327
- if accepts_eta:
328
- extra_step_kwargs["eta"] = eta
329
-
330
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
331
- # expand the latents if we are doing classifier free guidance
332
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
333
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
334
-
335
- # predict the noise residual
336
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
337
-
338
- # perform guidance
339
- if do_classifier_free_guidance:
340
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
341
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
342
-
343
- # compute the previous noisy sample x_t -> x_t-1
344
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
345
-
346
- # call the callback, if provided
347
- if callback is not None and i % callback_steps == 0:
348
- step_idx = i // getattr(self.scheduler, "order", 1)
349
- callback(step_idx, t, latents)
350
-
351
- latents = 1 / 0.18215 * latents
352
- image = self.vae.decode(latents).sample
353
-
354
- image = (image / 2 + 0.5).clamp(0, 1)
355
-
356
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
357
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
358
-
359
- if self.safety_checker is not None:
360
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
361
- self.device
362
- )
363
- image, has_nsfw_concept = self.safety_checker(
364
- images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
365
- )
366
- else:
367
- has_nsfw_concept = None
368
-
369
- if output_type == "pil":
370
- image = self.numpy_to_pil(image)
371
-
372
- if not return_dict:
373
- return (image, has_nsfw_concept)
374
-
375
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
376
-
377
- def embed_text(self, text):
378
- """takes in text and turns it into text embeddings"""
379
- text_input = self.tokenizer(
380
- text,
381
- padding="max_length",
382
- max_length=self.tokenizer.model_max_length,
383
- truncation=True,
384
- return_tensors="pt",
385
- )
386
- with torch.no_grad():
387
- embed = self.text_encoder(text_input.input_ids.to(self.device))[0]
388
- return embed
389
-
390
- def get_noise(self, seed, dtype=torch.float32, height=512, width=512):
391
- """Takes in random seed and returns corresponding noise vector"""
392
- return torch.randn(
393
- (1, self.unet.config.in_channels, height // 8, width // 8),
394
- generator=torch.Generator(device=self.device).manual_seed(seed),
395
- device=self.device,
396
- dtype=dtype,
397
- )
398
-
399
- def walk(
400
- self,
401
- prompts: List[str],
402
- seeds: List[int],
403
- num_interpolation_steps: Optional[int] = 6,
404
- output_dir: Optional[str] = "./dreams",
405
- name: Optional[str] = None,
406
- batch_size: Optional[int] = 1,
407
- height: Optional[int] = 512,
408
- width: Optional[int] = 512,
409
- guidance_scale: Optional[float] = 7.5,
410
- num_inference_steps: Optional[int] = 50,
411
- eta: Optional[float] = 0.0,
412
- ) -> List[str]:
413
- """
414
- Walks through a series of prompts and seeds, interpolating between them and saving the results to disk.
415
-
416
- Args:
417
- prompts (`List[str]`):
418
- List of prompts to generate images for.
419
- seeds (`List[int]`):
420
- List of seeds corresponding to provided prompts. Must be the same length as prompts.
421
- num_interpolation_steps (`int`, *optional*, defaults to 6):
422
- Number of interpolation steps to take between prompts.
423
- output_dir (`str`, *optional*, defaults to `./dreams`):
424
- Directory to save the generated images to.
425
- name (`str`, *optional*, defaults to `None`):
426
- Subdirectory of `output_dir` to save the generated images to. If `None`, the name will
427
- be the current time.
428
- batch_size (`int`, *optional*, defaults to 1):
429
- Number of images to generate at once.
430
- height (`int`, *optional*, defaults to 512):
431
- Height of the generated images.
432
- width (`int`, *optional*, defaults to 512):
433
- Width of the generated images.
434
- guidance_scale (`float`, *optional*, defaults to 7.5):
435
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
436
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
437
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
438
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
439
- usually at the expense of lower image quality.
440
- num_inference_steps (`int`, *optional*, defaults to 50):
441
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
442
- expense of slower inference.
443
- eta (`float`, *optional*, defaults to 0.0):
444
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
445
- [`schedulers.DDIMScheduler`], will be ignored for others.
446
-
447
- Returns:
448
- `List[str]`: List of paths to the generated images.
449
- """
450
- if not len(prompts) == len(seeds):
451
- raise ValueError(
452
- f"Number of prompts and seeds must be equalGot {len(prompts)} prompts and {len(seeds)} seeds"
453
- )
454
-
455
- name = name or time.strftime("%Y%m%d-%H%M%S")
456
- save_path = Path(output_dir) / name
457
- save_path.mkdir(exist_ok=True, parents=True)
458
-
459
- frame_idx = 0
460
- frame_filepaths = []
461
- for prompt_a, prompt_b, seed_a, seed_b in zip(prompts, prompts[1:], seeds, seeds[1:]):
462
- # Embed Text
463
- embed_a = self.embed_text(prompt_a)
464
- embed_b = self.embed_text(prompt_b)
465
-
466
- # Get Noise
467
- noise_dtype = embed_a.dtype
468
- noise_a = self.get_noise(seed_a, noise_dtype, height, width)
469
- noise_b = self.get_noise(seed_b, noise_dtype, height, width)
470
-
471
- noise_batch, embeds_batch = None, None
472
- T = np.linspace(0.0, 1.0, num_interpolation_steps)
473
- for i, t in enumerate(T):
474
- noise = slerp(float(t), noise_a, noise_b)
475
- embed = torch.lerp(embed_a, embed_b, t)
476
-
477
- noise_batch = noise if noise_batch is None else torch.cat([noise_batch, noise], dim=0)
478
- embeds_batch = embed if embeds_batch is None else torch.cat([embeds_batch, embed], dim=0)
479
-
480
- batch_is_ready = embeds_batch.shape[0] == batch_size or i + 1 == T.shape[0]
481
- if batch_is_ready:
482
- outputs = self(
483
- latents=noise_batch,
484
- text_embeddings=embeds_batch,
485
- height=height,
486
- width=width,
487
- guidance_scale=guidance_scale,
488
- eta=eta,
489
- num_inference_steps=num_inference_steps,
490
- )
491
- noise_batch, embeds_batch = None, None
492
-
493
- for image in outputs["images"]:
494
- frame_filepath = str(save_path / f"frame_{frame_idx:06d}.png")
495
- image.save(frame_filepath)
496
- frame_filepaths.append(frame_filepath)
497
- frame_idx += 1
498
- return frame_filepaths
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ip_adapter_face_id.py DELETED
@@ -1,1125 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Union
17
-
18
- import torch
19
- import torch.nn as nn
20
- import torch.nn.functional as F
21
- from packaging import version
22
- from safetensors import safe_open
23
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
24
-
25
- from diffusers.configuration_utils import FrozenDict
26
- from diffusers.image_processor import VaeImageProcessor
27
- from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
28
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
29
- from diffusers.models.attention_processor import (
30
- AttnProcessor,
31
- AttnProcessor2_0,
32
- IPAdapterAttnProcessor,
33
- IPAdapterAttnProcessor2_0,
34
- )
35
- from diffusers.models.embeddings import MultiIPAdapterImageProjection
36
- from diffusers.models.lora import adjust_lora_scale_text_encoder
37
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
38
- from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
39
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
40
- from diffusers.schedulers import KarrasDiffusionSchedulers
41
- from diffusers.utils import (
42
- USE_PEFT_BACKEND,
43
- _get_model_file,
44
- deprecate,
45
- logging,
46
- scale_lora_layers,
47
- unscale_lora_layers,
48
- )
49
- from diffusers.utils.torch_utils import randn_tensor
50
-
51
-
52
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
53
-
54
-
55
- class IPAdapterFullImageProjection(nn.Module):
56
- def __init__(self, image_embed_dim=1024, cross_attention_dim=1024, mult=1, num_tokens=1):
57
- super().__init__()
58
- from diffusers.models.attention import FeedForward
59
-
60
- self.num_tokens = num_tokens
61
- self.cross_attention_dim = cross_attention_dim
62
- self.ff = FeedForward(image_embed_dim, cross_attention_dim * num_tokens, mult=mult, activation_fn="gelu")
63
- self.norm = nn.LayerNorm(cross_attention_dim)
64
-
65
- def forward(self, image_embeds: torch.Tensor):
66
- x = self.ff(image_embeds)
67
- x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
68
- return self.norm(x)
69
-
70
-
71
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
72
- """
73
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
74
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
75
- """
76
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
77
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
78
- # rescale the results from guidance (fixes overexposure)
79
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
80
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
81
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
82
- return noise_cfg
83
-
84
-
85
- def retrieve_timesteps(
86
- scheduler,
87
- num_inference_steps: Optional[int] = None,
88
- device: Optional[Union[str, torch.device]] = None,
89
- timesteps: Optional[List[int]] = None,
90
- **kwargs,
91
- ):
92
- """
93
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
94
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
95
-
96
- Args:
97
- scheduler (`SchedulerMixin`):
98
- The scheduler to get timesteps from.
99
- num_inference_steps (`int`):
100
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
101
- `timesteps` must be `None`.
102
- device (`str` or `torch.device`, *optional*):
103
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
104
- timesteps (`List[int]`, *optional*):
105
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
106
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
107
- must be `None`.
108
-
109
- Returns:
110
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
111
- second element is the number of inference steps.
112
- """
113
- if timesteps is not None:
114
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
115
- if not accepts_timesteps:
116
- raise ValueError(
117
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
118
- f" timestep schedules. Please check whether you are using the correct scheduler."
119
- )
120
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
121
- timesteps = scheduler.timesteps
122
- num_inference_steps = len(timesteps)
123
- else:
124
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
125
- timesteps = scheduler.timesteps
126
- return timesteps, num_inference_steps
127
-
128
-
129
- class IPAdapterFaceIDStableDiffusionPipeline(
130
- DiffusionPipeline,
131
- StableDiffusionMixin,
132
- TextualInversionLoaderMixin,
133
- LoraLoaderMixin,
134
- IPAdapterMixin,
135
- FromSingleFileMixin,
136
- ):
137
- r"""
138
- Pipeline for text-to-image generation using Stable Diffusion.
139
-
140
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
141
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
142
-
143
- The pipeline also inherits the following loading methods:
144
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
145
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
146
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
147
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
148
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
149
-
150
- Args:
151
- vae ([`AutoencoderKL`]):
152
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
153
- text_encoder ([`~transformers.CLIPTextModel`]):
154
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
155
- tokenizer ([`~transformers.CLIPTokenizer`]):
156
- A `CLIPTokenizer` to tokenize text.
157
- unet ([`UNet2DConditionModel`]):
158
- A `UNet2DConditionModel` to denoise the encoded image latents.
159
- scheduler ([`SchedulerMixin`]):
160
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
161
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
162
- safety_checker ([`StableDiffusionSafetyChecker`]):
163
- Classification module that estimates whether generated images could be considered offensive or harmful.
164
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
165
- about a model's potential harms.
166
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
167
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
168
- """
169
-
170
- model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
171
- _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
172
- _exclude_from_cpu_offload = ["safety_checker"]
173
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
174
-
175
- def __init__(
176
- self,
177
- vae: AutoencoderKL,
178
- text_encoder: CLIPTextModel,
179
- tokenizer: CLIPTokenizer,
180
- unet: UNet2DConditionModel,
181
- scheduler: KarrasDiffusionSchedulers,
182
- safety_checker: StableDiffusionSafetyChecker,
183
- feature_extractor: CLIPImageProcessor,
184
- image_encoder: CLIPVisionModelWithProjection = None,
185
- requires_safety_checker: bool = True,
186
- ):
187
- super().__init__()
188
-
189
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
190
- deprecation_message = (
191
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
192
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
193
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
194
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
195
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
196
- " file"
197
- )
198
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
199
- new_config = dict(scheduler.config)
200
- new_config["steps_offset"] = 1
201
- scheduler._internal_dict = FrozenDict(new_config)
202
-
203
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
204
- deprecation_message = (
205
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
206
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
207
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
208
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
209
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
210
- )
211
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
212
- new_config = dict(scheduler.config)
213
- new_config["clip_sample"] = False
214
- scheduler._internal_dict = FrozenDict(new_config)
215
-
216
- if safety_checker is None and requires_safety_checker:
217
- logger.warning(
218
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
219
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
220
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
221
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
222
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
223
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
224
- )
225
-
226
- if safety_checker is not None and feature_extractor is None:
227
- raise ValueError(
228
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
229
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
230
- )
231
-
232
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
233
- version.parse(unet.config._diffusers_version).base_version
234
- ) < version.parse("0.9.0.dev0")
235
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
236
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
237
- deprecation_message = (
238
- "The configuration file of the unet has set the default `sample_size` to smaller than"
239
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
240
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
241
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
242
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
243
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
244
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
245
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
246
- " the `unet/config.json` file"
247
- )
248
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
249
- new_config = dict(unet.config)
250
- new_config["sample_size"] = 64
251
- unet._internal_dict = FrozenDict(new_config)
252
-
253
- self.register_modules(
254
- vae=vae,
255
- text_encoder=text_encoder,
256
- tokenizer=tokenizer,
257
- unet=unet,
258
- scheduler=scheduler,
259
- safety_checker=safety_checker,
260
- feature_extractor=feature_extractor,
261
- image_encoder=image_encoder,
262
- )
263
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
264
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
265
- self.register_to_config(requires_safety_checker=requires_safety_checker)
266
-
267
- def load_ip_adapter_face_id(self, pretrained_model_name_or_path_or_dict, weight_name, **kwargs):
268
- cache_dir = kwargs.pop("cache_dir", None)
269
- force_download = kwargs.pop("force_download", False)
270
- resume_download = kwargs.pop("resume_download", False)
271
- proxies = kwargs.pop("proxies", None)
272
- local_files_only = kwargs.pop("local_files_only", None)
273
- token = kwargs.pop("token", None)
274
- revision = kwargs.pop("revision", None)
275
- subfolder = kwargs.pop("subfolder", None)
276
-
277
- user_agent = {
278
- "file_type": "attn_procs_weights",
279
- "framework": "pytorch",
280
- }
281
- model_file = _get_model_file(
282
- pretrained_model_name_or_path_or_dict,
283
- weights_name=weight_name,
284
- cache_dir=cache_dir,
285
- force_download=force_download,
286
- resume_download=resume_download,
287
- proxies=proxies,
288
- local_files_only=local_files_only,
289
- token=token,
290
- revision=revision,
291
- subfolder=subfolder,
292
- user_agent=user_agent,
293
- )
294
- if weight_name.endswith(".safetensors"):
295
- state_dict = {"image_proj": {}, "ip_adapter": {}}
296
- with safe_open(model_file, framework="pt", device="cpu") as f:
297
- for key in f.keys():
298
- if key.startswith("image_proj."):
299
- state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
300
- elif key.startswith("ip_adapter."):
301
- state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
302
- else:
303
- state_dict = torch.load(model_file, map_location="cpu")
304
- self._load_ip_adapter_weights(state_dict)
305
-
306
- def convert_ip_adapter_image_proj_to_diffusers(self, state_dict):
307
- updated_state_dict = {}
308
- clip_embeddings_dim_in = state_dict["proj.0.weight"].shape[1]
309
- clip_embeddings_dim_out = state_dict["proj.0.weight"].shape[0]
310
- multiplier = clip_embeddings_dim_out // clip_embeddings_dim_in
311
- norm_layer = "norm.weight"
312
- cross_attention_dim = state_dict[norm_layer].shape[0]
313
- num_tokens = state_dict["proj.2.weight"].shape[0] // cross_attention_dim
314
-
315
- image_projection = IPAdapterFullImageProjection(
316
- cross_attention_dim=cross_attention_dim,
317
- image_embed_dim=clip_embeddings_dim_in,
318
- mult=multiplier,
319
- num_tokens=num_tokens,
320
- )
321
-
322
- for key, value in state_dict.items():
323
- diffusers_name = key.replace("proj.0", "ff.net.0.proj")
324
- diffusers_name = diffusers_name.replace("proj.2", "ff.net.2")
325
- updated_state_dict[diffusers_name] = value
326
-
327
- image_projection.load_state_dict(updated_state_dict)
328
- return image_projection
329
-
330
- def _load_ip_adapter_weights(self, state_dict):
331
- num_image_text_embeds = 4
332
-
333
- self.unet.encoder_hid_proj = None
334
-
335
- # set ip-adapter cross-attention processors & load state_dict
336
- attn_procs = {}
337
- lora_dict = {}
338
- key_id = 0
339
- for name in self.unet.attn_processors.keys():
340
- cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim
341
- if name.startswith("mid_block"):
342
- hidden_size = self.unet.config.block_out_channels[-1]
343
- elif name.startswith("up_blocks"):
344
- block_id = int(name[len("up_blocks.")])
345
- hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id]
346
- elif name.startswith("down_blocks"):
347
- block_id = int(name[len("down_blocks.")])
348
- hidden_size = self.unet.config.block_out_channels[block_id]
349
- if cross_attention_dim is None or "motion_modules" in name:
350
- attn_processor_class = (
351
- AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
352
- )
353
- attn_procs[name] = attn_processor_class()
354
-
355
- lora_dict.update(
356
- {f"unet.{name}.to_k_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.down.weight"]}
357
- )
358
- lora_dict.update(
359
- {f"unet.{name}.to_q_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.down.weight"]}
360
- )
361
- lora_dict.update(
362
- {f"unet.{name}.to_v_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.down.weight"]}
363
- )
364
- lora_dict.update(
365
- {
366
- f"unet.{name}.to_out_lora.down.weight": state_dict["ip_adapter"][
367
- f"{key_id}.to_out_lora.down.weight"
368
- ]
369
- }
370
- )
371
- lora_dict.update(
372
- {f"unet.{name}.to_k_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.up.weight"]}
373
- )
374
- lora_dict.update(
375
- {f"unet.{name}.to_q_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.up.weight"]}
376
- )
377
- lora_dict.update(
378
- {f"unet.{name}.to_v_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.up.weight"]}
379
- )
380
- lora_dict.update(
381
- {f"unet.{name}.to_out_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_out_lora.up.weight"]}
382
- )
383
- key_id += 1
384
- else:
385
- attn_processor_class = (
386
- IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor
387
- )
388
- attn_procs[name] = attn_processor_class(
389
- hidden_size=hidden_size,
390
- cross_attention_dim=cross_attention_dim,
391
- scale=1.0,
392
- num_tokens=num_image_text_embeds,
393
- ).to(dtype=self.dtype, device=self.device)
394
-
395
- lora_dict.update(
396
- {f"unet.{name}.to_k_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.down.weight"]}
397
- )
398
- lora_dict.update(
399
- {f"unet.{name}.to_q_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.down.weight"]}
400
- )
401
- lora_dict.update(
402
- {f"unet.{name}.to_v_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.down.weight"]}
403
- )
404
- lora_dict.update(
405
- {
406
- f"unet.{name}.to_out_lora.down.weight": state_dict["ip_adapter"][
407
- f"{key_id}.to_out_lora.down.weight"
408
- ]
409
- }
410
- )
411
- lora_dict.update(
412
- {f"unet.{name}.to_k_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.up.weight"]}
413
- )
414
- lora_dict.update(
415
- {f"unet.{name}.to_q_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.up.weight"]}
416
- )
417
- lora_dict.update(
418
- {f"unet.{name}.to_v_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.up.weight"]}
419
- )
420
- lora_dict.update(
421
- {f"unet.{name}.to_out_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_out_lora.up.weight"]}
422
- )
423
-
424
- value_dict = {}
425
- value_dict.update({"to_k_ip.0.weight": state_dict["ip_adapter"][f"{key_id}.to_k_ip.weight"]})
426
- value_dict.update({"to_v_ip.0.weight": state_dict["ip_adapter"][f"{key_id}.to_v_ip.weight"]})
427
- attn_procs[name].load_state_dict(value_dict)
428
- key_id += 1
429
-
430
- self.unet.set_attn_processor(attn_procs)
431
-
432
- self.load_lora_weights(lora_dict, adapter_name="faceid")
433
- self.set_adapters(["faceid"], adapter_weights=[1.0])
434
-
435
- # convert IP-Adapter Image Projection layers to diffusers
436
- image_projection = self.convert_ip_adapter_image_proj_to_diffusers(state_dict["image_proj"])
437
- image_projection_layers = [image_projection.to(device=self.device, dtype=self.dtype)]
438
-
439
- self.unet.encoder_hid_proj = MultiIPAdapterImageProjection(image_projection_layers)
440
- self.unet.config.encoder_hid_dim_type = "ip_image_proj"
441
-
442
- def set_ip_adapter_scale(self, scale):
443
- unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
444
- for attn_processor in unet.attn_processors.values():
445
- if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
446
- attn_processor.scale = [scale]
447
-
448
- def _encode_prompt(
449
- self,
450
- prompt,
451
- device,
452
- num_images_per_prompt,
453
- do_classifier_free_guidance,
454
- negative_prompt=None,
455
- prompt_embeds: Optional[torch.Tensor] = None,
456
- negative_prompt_embeds: Optional[torch.Tensor] = None,
457
- lora_scale: Optional[float] = None,
458
- **kwargs,
459
- ):
460
- deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
461
- deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
462
-
463
- prompt_embeds_tuple = self.encode_prompt(
464
- prompt=prompt,
465
- device=device,
466
- num_images_per_prompt=num_images_per_prompt,
467
- do_classifier_free_guidance=do_classifier_free_guidance,
468
- negative_prompt=negative_prompt,
469
- prompt_embeds=prompt_embeds,
470
- negative_prompt_embeds=negative_prompt_embeds,
471
- lora_scale=lora_scale,
472
- **kwargs,
473
- )
474
-
475
- # concatenate for backwards comp
476
- prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
477
-
478
- return prompt_embeds
479
-
480
- def encode_prompt(
481
- self,
482
- prompt,
483
- device,
484
- num_images_per_prompt,
485
- do_classifier_free_guidance,
486
- negative_prompt=None,
487
- prompt_embeds: Optional[torch.Tensor] = None,
488
- negative_prompt_embeds: Optional[torch.Tensor] = None,
489
- lora_scale: Optional[float] = None,
490
- clip_skip: Optional[int] = None,
491
- ):
492
- r"""
493
- Encodes the prompt into text encoder hidden states.
494
-
495
- Args:
496
- prompt (`str` or `List[str]`, *optional*):
497
- prompt to be encoded
498
- device: (`torch.device`):
499
- torch device
500
- num_images_per_prompt (`int`):
501
- number of images that should be generated per prompt
502
- do_classifier_free_guidance (`bool`):
503
- whether to use classifier free guidance or not
504
- negative_prompt (`str` or `List[str]`, *optional*):
505
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
506
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
507
- less than `1`).
508
- prompt_embeds (`torch.Tensor`, *optional*):
509
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
510
- provided, text embeddings will be generated from `prompt` input argument.
511
- negative_prompt_embeds (`torch.Tensor`, *optional*):
512
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
513
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
514
- argument.
515
- lora_scale (`float`, *optional*):
516
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
517
- clip_skip (`int`, *optional*):
518
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
519
- the output of the pre-final layer will be used for computing the prompt embeddings.
520
- """
521
- # set lora scale so that monkey patched LoRA
522
- # function of text encoder can correctly access it
523
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
524
- self._lora_scale = lora_scale
525
-
526
- # dynamically adjust the LoRA scale
527
- if not USE_PEFT_BACKEND:
528
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
529
- else:
530
- scale_lora_layers(self.text_encoder, lora_scale)
531
-
532
- if prompt is not None and isinstance(prompt, str):
533
- batch_size = 1
534
- elif prompt is not None and isinstance(prompt, list):
535
- batch_size = len(prompt)
536
- else:
537
- batch_size = prompt_embeds.shape[0]
538
-
539
- if prompt_embeds is None:
540
- # textual inversion: process multi-vector tokens if necessary
541
- if isinstance(self, TextualInversionLoaderMixin):
542
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
543
-
544
- text_inputs = self.tokenizer(
545
- prompt,
546
- padding="max_length",
547
- max_length=self.tokenizer.model_max_length,
548
- truncation=True,
549
- return_tensors="pt",
550
- )
551
- text_input_ids = text_inputs.input_ids
552
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
553
-
554
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
555
- text_input_ids, untruncated_ids
556
- ):
557
- removed_text = self.tokenizer.batch_decode(
558
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
559
- )
560
- logger.warning(
561
- "The following part of your input was truncated because CLIP can only handle sequences up to"
562
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
563
- )
564
-
565
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
566
- attention_mask = text_inputs.attention_mask.to(device)
567
- else:
568
- attention_mask = None
569
-
570
- if clip_skip is None:
571
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
572
- prompt_embeds = prompt_embeds[0]
573
- else:
574
- prompt_embeds = self.text_encoder(
575
- text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
576
- )
577
- # Access the `hidden_states` first, that contains a tuple of
578
- # all the hidden states from the encoder layers. Then index into
579
- # the tuple to access the hidden states from the desired layer.
580
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
581
- # We also need to apply the final LayerNorm here to not mess with the
582
- # representations. The `last_hidden_states` that we typically use for
583
- # obtaining the final prompt representations passes through the LayerNorm
584
- # layer.
585
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
586
-
587
- if self.text_encoder is not None:
588
- prompt_embeds_dtype = self.text_encoder.dtype
589
- elif self.unet is not None:
590
- prompt_embeds_dtype = self.unet.dtype
591
- else:
592
- prompt_embeds_dtype = prompt_embeds.dtype
593
-
594
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
595
-
596
- bs_embed, seq_len, _ = prompt_embeds.shape
597
- # duplicate text embeddings for each generation per prompt, using mps friendly method
598
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
599
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
600
-
601
- # get unconditional embeddings for classifier free guidance
602
- if do_classifier_free_guidance and negative_prompt_embeds is None:
603
- uncond_tokens: List[str]
604
- if negative_prompt is None:
605
- uncond_tokens = [""] * batch_size
606
- elif prompt is not None and type(prompt) is not type(negative_prompt):
607
- raise TypeError(
608
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
609
- f" {type(prompt)}."
610
- )
611
- elif isinstance(negative_prompt, str):
612
- uncond_tokens = [negative_prompt]
613
- elif batch_size != len(negative_prompt):
614
- raise ValueError(
615
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
616
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
617
- " the batch size of `prompt`."
618
- )
619
- else:
620
- uncond_tokens = negative_prompt
621
-
622
- # textual inversion: process multi-vector tokens if necessary
623
- if isinstance(self, TextualInversionLoaderMixin):
624
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
625
-
626
- max_length = prompt_embeds.shape[1]
627
- uncond_input = self.tokenizer(
628
- uncond_tokens,
629
- padding="max_length",
630
- max_length=max_length,
631
- truncation=True,
632
- return_tensors="pt",
633
- )
634
-
635
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
636
- attention_mask = uncond_input.attention_mask.to(device)
637
- else:
638
- attention_mask = None
639
-
640
- negative_prompt_embeds = self.text_encoder(
641
- uncond_input.input_ids.to(device),
642
- attention_mask=attention_mask,
643
- )
644
- negative_prompt_embeds = negative_prompt_embeds[0]
645
-
646
- if do_classifier_free_guidance:
647
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
648
- seq_len = negative_prompt_embeds.shape[1]
649
-
650
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
651
-
652
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
653
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
654
-
655
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
656
- # Retrieve the original scale by scaling back the LoRA layers
657
- unscale_lora_layers(self.text_encoder, lora_scale)
658
-
659
- return prompt_embeds, negative_prompt_embeds
660
-
661
- def run_safety_checker(self, image, device, dtype):
662
- if self.safety_checker is None:
663
- has_nsfw_concept = None
664
- else:
665
- if torch.is_tensor(image):
666
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
667
- else:
668
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
669
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
670
- image, has_nsfw_concept = self.safety_checker(
671
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
672
- )
673
- return image, has_nsfw_concept
674
-
675
- def decode_latents(self, latents):
676
- deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
677
- deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
678
-
679
- latents = 1 / self.vae.config.scaling_factor * latents
680
- image = self.vae.decode(latents, return_dict=False)[0]
681
- image = (image / 2 + 0.5).clamp(0, 1)
682
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
683
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
684
- return image
685
-
686
- def prepare_extra_step_kwargs(self, generator, eta):
687
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
688
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
689
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
690
- # and should be between [0, 1]
691
-
692
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
693
- extra_step_kwargs = {}
694
- if accepts_eta:
695
- extra_step_kwargs["eta"] = eta
696
-
697
- # check if the scheduler accepts generator
698
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
699
- if accepts_generator:
700
- extra_step_kwargs["generator"] = generator
701
- return extra_step_kwargs
702
-
703
- def check_inputs(
704
- self,
705
- prompt,
706
- height,
707
- width,
708
- callback_steps,
709
- negative_prompt=None,
710
- prompt_embeds=None,
711
- negative_prompt_embeds=None,
712
- callback_on_step_end_tensor_inputs=None,
713
- ):
714
- if height % 8 != 0 or width % 8 != 0:
715
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
716
-
717
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
718
- raise ValueError(
719
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
720
- f" {type(callback_steps)}."
721
- )
722
- if callback_on_step_end_tensor_inputs is not None and not all(
723
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
724
- ):
725
- raise ValueError(
726
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
727
- )
728
-
729
- if prompt is not None and prompt_embeds is not None:
730
- raise ValueError(
731
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
732
- " only forward one of the two."
733
- )
734
- elif prompt is None and prompt_embeds is None:
735
- raise ValueError(
736
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
737
- )
738
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
739
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
740
-
741
- if negative_prompt is not None and negative_prompt_embeds is not None:
742
- raise ValueError(
743
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
744
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
745
- )
746
-
747
- if prompt_embeds is not None and negative_prompt_embeds is not None:
748
- if prompt_embeds.shape != negative_prompt_embeds.shape:
749
- raise ValueError(
750
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
751
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
752
- f" {negative_prompt_embeds.shape}."
753
- )
754
-
755
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
756
- shape = (
757
- batch_size,
758
- num_channels_latents,
759
- int(height) // self.vae_scale_factor,
760
- int(width) // self.vae_scale_factor,
761
- )
762
- if isinstance(generator, list) and len(generator) != batch_size:
763
- raise ValueError(
764
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
765
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
766
- )
767
-
768
- if latents is None:
769
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
770
- else:
771
- latents = latents.to(device)
772
-
773
- # scale the initial noise by the standard deviation required by the scheduler
774
- latents = latents * self.scheduler.init_noise_sigma
775
- return latents
776
-
777
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
778
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
779
- """
780
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
781
-
782
- Args:
783
- timesteps (`torch.Tensor`):
784
- generate embedding vectors at these timesteps
785
- embedding_dim (`int`, *optional*, defaults to 512):
786
- dimension of the embeddings to generate
787
- dtype:
788
- data type of the generated embeddings
789
-
790
- Returns:
791
- `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
792
- """
793
- assert len(w.shape) == 1
794
- w = w * 1000.0
795
-
796
- half_dim = embedding_dim // 2
797
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
798
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
799
- emb = w.to(dtype)[:, None] * emb[None, :]
800
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
801
- if embedding_dim % 2 == 1: # zero pad
802
- emb = torch.nn.functional.pad(emb, (0, 1))
803
- assert emb.shape == (w.shape[0], embedding_dim)
804
- return emb
805
-
806
- @property
807
- def guidance_scale(self):
808
- return self._guidance_scale
809
-
810
- @property
811
- def guidance_rescale(self):
812
- return self._guidance_rescale
813
-
814
- @property
815
- def clip_skip(self):
816
- return self._clip_skip
817
-
818
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
819
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
820
- # corresponds to doing no classifier free guidance.
821
- @property
822
- def do_classifier_free_guidance(self):
823
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
824
-
825
- @property
826
- def cross_attention_kwargs(self):
827
- return self._cross_attention_kwargs
828
-
829
- @property
830
- def num_timesteps(self):
831
- return self._num_timesteps
832
-
833
- @property
834
- def interrupt(self):
835
- return self._interrupt
836
-
837
- @torch.no_grad()
838
- def __call__(
839
- self,
840
- prompt: Union[str, List[str]] = None,
841
- height: Optional[int] = None,
842
- width: Optional[int] = None,
843
- num_inference_steps: int = 50,
844
- timesteps: List[int] = None,
845
- guidance_scale: float = 7.5,
846
- negative_prompt: Optional[Union[str, List[str]]] = None,
847
- num_images_per_prompt: Optional[int] = 1,
848
- eta: float = 0.0,
849
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
850
- latents: Optional[torch.Tensor] = None,
851
- prompt_embeds: Optional[torch.Tensor] = None,
852
- negative_prompt_embeds: Optional[torch.Tensor] = None,
853
- image_embeds: Optional[torch.Tensor] = None,
854
- output_type: Optional[str] = "pil",
855
- return_dict: bool = True,
856
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
857
- guidance_rescale: float = 0.0,
858
- clip_skip: Optional[int] = None,
859
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
860
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
861
- **kwargs,
862
- ):
863
- r"""
864
- The call function to the pipeline for generation.
865
-
866
- Args:
867
- prompt (`str` or `List[str]`, *optional*):
868
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
869
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
870
- The height in pixels of the generated image.
871
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
872
- The width in pixels of the generated image.
873
- num_inference_steps (`int`, *optional*, defaults to 50):
874
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
875
- expense of slower inference.
876
- timesteps (`List[int]`, *optional*):
877
- Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
878
- in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
879
- passed will be used. Must be in descending order.
880
- guidance_scale (`float`, *optional*, defaults to 7.5):
881
- A higher guidance scale value encourages the model to generate images closely linked to the text
882
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
883
- negative_prompt (`str` or `List[str]`, *optional*):
884
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
885
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
886
- num_images_per_prompt (`int`, *optional*, defaults to 1):
887
- The number of images to generate per prompt.
888
- eta (`float`, *optional*, defaults to 0.0):
889
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
890
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
891
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
892
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
893
- generation deterministic.
894
- latents (`torch.Tensor`, *optional*):
895
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
896
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
897
- tensor is generated by sampling using the supplied random `generator`.
898
- prompt_embeds (`torch.Tensor`, *optional*):
899
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
900
- provided, text embeddings are generated from the `prompt` input argument.
901
- negative_prompt_embeds (`torch.Tensor`, *optional*):
902
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
903
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
904
- image_embeds (`torch.Tensor`, *optional*):
905
- Pre-generated image embeddings.
906
- output_type (`str`, *optional*, defaults to `"pil"`):
907
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
908
- return_dict (`bool`, *optional*, defaults to `True`):
909
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
910
- plain tuple.
911
- cross_attention_kwargs (`dict`, *optional*):
912
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
913
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
914
- guidance_rescale (`float`, *optional*, defaults to 0.0):
915
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
916
- Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
917
- using zero terminal SNR.
918
- clip_skip (`int`, *optional*):
919
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
920
- the output of the pre-final layer will be used for computing the prompt embeddings.
921
- callback_on_step_end (`Callable`, *optional*):
922
- A function that calls at the end of each denoising steps during the inference. The function is called
923
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
924
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
925
- `callback_on_step_end_tensor_inputs`.
926
- callback_on_step_end_tensor_inputs (`List`, *optional*):
927
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
928
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
929
- `._callback_tensor_inputs` attribute of your pipeline class.
930
-
931
- Examples:
932
-
933
- Returns:
934
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
935
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
936
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
937
- second element is a list of `bool`s indicating whether the corresponding generated image contains
938
- "not-safe-for-work" (nsfw) content.
939
- """
940
-
941
- callback = kwargs.pop("callback", None)
942
- callback_steps = kwargs.pop("callback_steps", None)
943
-
944
- if callback is not None:
945
- deprecate(
946
- "callback",
947
- "1.0.0",
948
- "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
949
- )
950
- if callback_steps is not None:
951
- deprecate(
952
- "callback_steps",
953
- "1.0.0",
954
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
955
- )
956
-
957
- # 0. Default height and width to unet
958
- height = height or self.unet.config.sample_size * self.vae_scale_factor
959
- width = width or self.unet.config.sample_size * self.vae_scale_factor
960
- # to deal with lora scaling and other possible forward hooks
961
-
962
- # 1. Check inputs. Raise error if not correct
963
- self.check_inputs(
964
- prompt,
965
- height,
966
- width,
967
- callback_steps,
968
- negative_prompt,
969
- prompt_embeds,
970
- negative_prompt_embeds,
971
- callback_on_step_end_tensor_inputs,
972
- )
973
-
974
- self._guidance_scale = guidance_scale
975
- self._guidance_rescale = guidance_rescale
976
- self._clip_skip = clip_skip
977
- self._cross_attention_kwargs = cross_attention_kwargs
978
- self._interrupt = False
979
-
980
- # 2. Define call parameters
981
- if prompt is not None and isinstance(prompt, str):
982
- batch_size = 1
983
- elif prompt is not None and isinstance(prompt, list):
984
- batch_size = len(prompt)
985
- else:
986
- batch_size = prompt_embeds.shape[0]
987
-
988
- device = self._execution_device
989
-
990
- # 3. Encode input prompt
991
- lora_scale = (
992
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
993
- )
994
-
995
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
996
- prompt,
997
- device,
998
- num_images_per_prompt,
999
- self.do_classifier_free_guidance,
1000
- negative_prompt,
1001
- prompt_embeds=prompt_embeds,
1002
- negative_prompt_embeds=negative_prompt_embeds,
1003
- lora_scale=lora_scale,
1004
- clip_skip=self.clip_skip,
1005
- )
1006
-
1007
- # For classifier free guidance, we need to do two forward passes.
1008
- # Here we concatenate the unconditional and text embeddings into a single batch
1009
- # to avoid doing two forward passes
1010
- if self.do_classifier_free_guidance:
1011
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1012
-
1013
- if image_embeds is not None:
1014
- image_embeds = torch.stack([image_embeds] * num_images_per_prompt, dim=0).to(
1015
- device=device, dtype=prompt_embeds.dtype
1016
- )
1017
- negative_image_embeds = torch.zeros_like(image_embeds)
1018
- if self.do_classifier_free_guidance:
1019
- image_embeds = torch.cat([negative_image_embeds, image_embeds])
1020
- image_embeds = [image_embeds]
1021
- # 4. Prepare timesteps
1022
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1023
-
1024
- # 5. Prepare latent variables
1025
- num_channels_latents = self.unet.config.in_channels
1026
- latents = self.prepare_latents(
1027
- batch_size * num_images_per_prompt,
1028
- num_channels_latents,
1029
- height,
1030
- width,
1031
- prompt_embeds.dtype,
1032
- device,
1033
- generator,
1034
- latents,
1035
- )
1036
-
1037
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1038
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1039
-
1040
- # 6.1 Add image embeds for IP-Adapter
1041
- added_cond_kwargs = {"image_embeds": image_embeds} if image_embeds is not None else {}
1042
-
1043
- # 6.2 Optionally get Guidance Scale Embedding
1044
- timestep_cond = None
1045
- if self.unet.config.time_cond_proj_dim is not None:
1046
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1047
- timestep_cond = self.get_guidance_scale_embedding(
1048
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1049
- ).to(device=device, dtype=latents.dtype)
1050
-
1051
- # 7. Denoising loop
1052
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1053
- self._num_timesteps = len(timesteps)
1054
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1055
- for i, t in enumerate(timesteps):
1056
- if self.interrupt:
1057
- continue
1058
-
1059
- # expand the latents if we are doing classifier free guidance
1060
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1061
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1062
-
1063
- # predict the noise residual
1064
- noise_pred = self.unet(
1065
- latent_model_input,
1066
- t,
1067
- encoder_hidden_states=prompt_embeds,
1068
- timestep_cond=timestep_cond,
1069
- cross_attention_kwargs=self.cross_attention_kwargs,
1070
- added_cond_kwargs=added_cond_kwargs,
1071
- return_dict=False,
1072
- )[0]
1073
-
1074
- # perform guidance
1075
- if self.do_classifier_free_guidance:
1076
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1077
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1078
-
1079
- if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1080
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1081
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1082
-
1083
- # compute the previous noisy sample x_t -> x_t-1
1084
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1085
-
1086
- if callback_on_step_end is not None:
1087
- callback_kwargs = {}
1088
- for k in callback_on_step_end_tensor_inputs:
1089
- callback_kwargs[k] = locals()[k]
1090
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1091
-
1092
- latents = callback_outputs.pop("latents", latents)
1093
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1094
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1095
-
1096
- # call the callback, if provided
1097
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1098
- progress_bar.update()
1099
- if callback is not None and i % callback_steps == 0:
1100
- step_idx = i // getattr(self.scheduler, "order", 1)
1101
- callback(step_idx, t, latents)
1102
-
1103
- if not output_type == "latent":
1104
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1105
- 0
1106
- ]
1107
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1108
- else:
1109
- image = latents
1110
- has_nsfw_concept = None
1111
-
1112
- if has_nsfw_concept is None:
1113
- do_denormalize = [True] * image.shape[0]
1114
- else:
1115
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1116
-
1117
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1118
-
1119
- # Offload all models
1120
- self.maybe_free_model_hooks()
1121
-
1122
- if not return_dict:
1123
- return (image, has_nsfw_concept)
1124
-
1125
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
kohya_hires_fix.py DELETED
@@ -1,468 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Any, Dict, List, Optional, Tuple, Union
16
-
17
- import torch
18
- import torch.nn as nn
19
- import torch.utils.checkpoint
20
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
21
-
22
- from diffusers.configuration_utils import register_to_config
23
- from diffusers.image_processor import VaeImageProcessor
24
- from diffusers.models.autoencoders import AutoencoderKL
25
- from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel, UNet2DConditionOutput
26
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
27
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
28
- from diffusers.schedulers import KarrasDiffusionSchedulers
29
- from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
30
-
31
-
32
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
-
34
-
35
- class UNet2DConditionModelHighResFix(UNet2DConditionModel):
36
- r"""
37
- A conditional 2D UNet model that applies Kohya fix proposed for high resolution image generation.
38
-
39
- This model inherits from [`UNet2DConditionModel`]. Check the superclass documentation for learning about all the parameters.
40
-
41
- Parameters:
42
- high_res_fix (`List[Dict]`, *optional*, defaults to `[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]`):
43
- Enables Kohya fix for high resolution generation. The activation maps are scaled based on the scale_factor up to the timestep at specified block_num.
44
- """
45
-
46
- _supports_gradient_checkpointing = True
47
-
48
- @register_to_config
49
- def __init__(self, high_res_fix: List[Dict] = [{"timestep": 600, "scale_factor": 0.5, "block_num": 1}], **kwargs):
50
- super().__init__(**kwargs)
51
- if high_res_fix:
52
- self.config.high_res_fix = sorted(high_res_fix, key=lambda x: x["timestep"], reverse=True)
53
-
54
- @classmethod
55
- def _resize(cls, sample, target=None, scale_factor=1, mode="bicubic"):
56
- dtype = sample.dtype
57
- if dtype == torch.bfloat16:
58
- sample = sample.to(torch.float32)
59
-
60
- if target is not None:
61
- if sample.shape[-2:] != target.shape[-2:]:
62
- sample = nn.functional.interpolate(sample, size=target.shape[-2:], mode=mode, align_corners=False)
63
- elif scale_factor != 1:
64
- sample = nn.functional.interpolate(sample, scale_factor=scale_factor, mode=mode, align_corners=False)
65
-
66
- return sample.to(dtype)
67
-
68
- def forward(
69
- self,
70
- sample: torch.FloatTensor,
71
- timestep: Union[torch.Tensor, float, int],
72
- encoder_hidden_states: torch.Tensor,
73
- class_labels: Optional[torch.Tensor] = None,
74
- timestep_cond: Optional[torch.Tensor] = None,
75
- attention_mask: Optional[torch.Tensor] = None,
76
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
77
- added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
78
- down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
79
- mid_block_additional_residual: Optional[torch.Tensor] = None,
80
- down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
81
- encoder_attention_mask: Optional[torch.Tensor] = None,
82
- return_dict: bool = True,
83
- ) -> Union[UNet2DConditionOutput, Tuple]:
84
- r"""
85
- The [`UNet2DConditionModel`] forward method.
86
-
87
- Args:
88
- sample (`torch.FloatTensor`):
89
- The noisy input tensor with the following shape `(batch, channel, height, width)`.
90
- timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
91
- encoder_hidden_states (`torch.FloatTensor`):
92
- The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
93
- class_labels (`torch.Tensor`, *optional*, defaults to `None`):
94
- Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
95
- timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
96
- Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
97
- through the `self.time_embedding` layer to obtain the timestep embeddings.
98
- attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
99
- An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
100
- is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
101
- negative values to the attention scores corresponding to "discard" tokens.
102
- cross_attention_kwargs (`dict`, *optional*):
103
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
104
- `self.processor` in
105
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
106
- added_cond_kwargs: (`dict`, *optional*):
107
- A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
108
- are passed along to the UNet blocks.
109
- down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
110
- A tuple of tensors that if specified are added to the residuals of down unet blocks.
111
- mid_block_additional_residual: (`torch.Tensor`, *optional*):
112
- A tensor that if specified is added to the residual of the middle unet block.
113
- down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
114
- additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
115
- encoder_attention_mask (`torch.Tensor`):
116
- A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
117
- `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
118
- which adds large negative values to the attention scores corresponding to "discard" tokens.
119
- return_dict (`bool`, *optional*, defaults to `True`):
120
- Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
121
- tuple.
122
-
123
- Returns:
124
- [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
125
- If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
126
- otherwise a `tuple` is returned where the first element is the sample tensor.
127
- """
128
- # By default samples have to be AT least a multiple of the overall upsampling factor.
129
- # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
130
- # However, the upsampling interpolation output size can be forced to fit any upsampling size
131
- # on the fly if necessary.
132
- default_overall_up_factor = 2**self.num_upsamplers
133
-
134
- # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
135
- forward_upsample_size = False
136
- upsample_size = None
137
-
138
- for dim in sample.shape[-2:]:
139
- if dim % default_overall_up_factor != 0:
140
- # Forward upsample size to force interpolation output size.
141
- forward_upsample_size = True
142
- break
143
-
144
- # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
145
- # expects mask of shape:
146
- # [batch, key_tokens]
147
- # adds singleton query_tokens dimension:
148
- # [batch, 1, key_tokens]
149
- # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
150
- # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
151
- # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
152
- if attention_mask is not None:
153
- # assume that mask is expressed as:
154
- # (1 = keep, 0 = discard)
155
- # convert mask into a bias that can be added to attention scores:
156
- # (keep = +0, discard = -10000.0)
157
- attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
158
- attention_mask = attention_mask.unsqueeze(1)
159
-
160
- # convert encoder_attention_mask to a bias the same way we do for attention_mask
161
- if encoder_attention_mask is not None:
162
- encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
163
- encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
164
-
165
- # 0. center input if necessary
166
- if self.config.center_input_sample:
167
- sample = 2 * sample - 1.0
168
-
169
- # 1. time
170
- t_emb = self.get_time_embed(sample=sample, timestep=timestep)
171
- emb = self.time_embedding(t_emb, timestep_cond)
172
- aug_emb = None
173
-
174
- class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
175
- if class_emb is not None:
176
- if self.config.class_embeddings_concat:
177
- emb = torch.cat([emb, class_emb], dim=-1)
178
- else:
179
- emb = emb + class_emb
180
-
181
- aug_emb = self.get_aug_embed(
182
- emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
183
- )
184
- if self.config.addition_embed_type == "image_hint":
185
- aug_emb, hint = aug_emb
186
- sample = torch.cat([sample, hint], dim=1)
187
-
188
- emb = emb + aug_emb if aug_emb is not None else emb
189
-
190
- if self.time_embed_act is not None:
191
- emb = self.time_embed_act(emb)
192
-
193
- encoder_hidden_states = self.process_encoder_hidden_states(
194
- encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
195
- )
196
-
197
- # 2. pre-process
198
- sample = self.conv_in(sample)
199
-
200
- # 2.5 GLIGEN position net
201
- if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
202
- cross_attention_kwargs = cross_attention_kwargs.copy()
203
- gligen_args = cross_attention_kwargs.pop("gligen")
204
- cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
205
-
206
- # 3. down
207
- # we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
208
- # to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
209
- if cross_attention_kwargs is not None:
210
- cross_attention_kwargs = cross_attention_kwargs.copy()
211
- lora_scale = cross_attention_kwargs.pop("scale", 1.0)
212
- else:
213
- lora_scale = 1.0
214
-
215
- if USE_PEFT_BACKEND:
216
- # weight the lora layers by setting `lora_scale` for each PEFT layer
217
- scale_lora_layers(self, lora_scale)
218
-
219
- is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
220
- # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
221
- is_adapter = down_intrablock_additional_residuals is not None
222
- # maintain backward compatibility for legacy usage, where
223
- # T2I-Adapter and ControlNet both use down_block_additional_residuals arg
224
- # but can only use one or the other
225
- if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
226
- deprecate(
227
- "T2I should not use down_block_additional_residuals",
228
- "1.3.0",
229
- "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
230
- and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
231
- for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
232
- standard_warn=False,
233
- )
234
- down_intrablock_additional_residuals = down_block_additional_residuals
235
- is_adapter = True
236
-
237
- down_block_res_samples = (sample,)
238
- for down_i, downsample_block in enumerate(self.down_blocks):
239
- if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
240
- # For t2i-adapter CrossAttnDownBlock2D
241
- additional_residuals = {}
242
- if is_adapter and len(down_intrablock_additional_residuals) > 0:
243
- additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
244
-
245
- sample, res_samples = downsample_block(
246
- hidden_states=sample,
247
- temb=emb,
248
- encoder_hidden_states=encoder_hidden_states,
249
- attention_mask=attention_mask,
250
- cross_attention_kwargs=cross_attention_kwargs,
251
- encoder_attention_mask=encoder_attention_mask,
252
- **additional_residuals,
253
- )
254
-
255
- else:
256
- sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
257
- if is_adapter and len(down_intrablock_additional_residuals) > 0:
258
- sample += down_intrablock_additional_residuals.pop(0)
259
-
260
- down_block_res_samples += res_samples
261
-
262
- # kohya high res fix
263
- if self.config.high_res_fix:
264
- for high_res_fix in self.config.high_res_fix:
265
- if timestep > high_res_fix["timestep"] and down_i == high_res_fix["block_num"]:
266
- sample = self.__class__._resize(sample, scale_factor=high_res_fix["scale_factor"])
267
- break
268
-
269
- if is_controlnet:
270
- new_down_block_res_samples = ()
271
-
272
- for down_block_res_sample, down_block_additional_residual in zip(
273
- down_block_res_samples, down_block_additional_residuals
274
- ):
275
- down_block_res_sample = down_block_res_sample + down_block_additional_residual
276
- new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
277
-
278
- down_block_res_samples = new_down_block_res_samples
279
-
280
- # 4. mid
281
- if self.mid_block is not None:
282
- if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
283
- sample = self.mid_block(
284
- sample,
285
- emb,
286
- encoder_hidden_states=encoder_hidden_states,
287
- attention_mask=attention_mask,
288
- cross_attention_kwargs=cross_attention_kwargs,
289
- encoder_attention_mask=encoder_attention_mask,
290
- )
291
- else:
292
- sample = self.mid_block(sample, emb)
293
-
294
- # To support T2I-Adapter-XL
295
- if (
296
- is_adapter
297
- and len(down_intrablock_additional_residuals) > 0
298
- and sample.shape == down_intrablock_additional_residuals[0].shape
299
- ):
300
- sample += down_intrablock_additional_residuals.pop(0)
301
-
302
- if is_controlnet:
303
- sample = sample + mid_block_additional_residual
304
-
305
- # 5. up
306
- for i, upsample_block in enumerate(self.up_blocks):
307
- is_final_block = i == len(self.up_blocks) - 1
308
-
309
- res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
310
- down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
311
-
312
- # up scaling of kohya high res fix
313
- if self.config.high_res_fix is not None:
314
- if res_samples[0].shape[-2:] != sample.shape[-2:]:
315
- sample = self.__class__._resize(sample, target=res_samples[0])
316
- res_samples_up_sampled = (res_samples[0],)
317
- for res_sample in res_samples[1:]:
318
- res_samples_up_sampled += (self.__class__._resize(res_sample, target=res_samples[0]),)
319
- res_samples = res_samples_up_sampled
320
-
321
- # if we have not reached the final block and need to forward the
322
- # upsample size, we do it here
323
- if not is_final_block and forward_upsample_size:
324
- upsample_size = down_block_res_samples[-1].shape[2:]
325
-
326
- if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
327
- sample = upsample_block(
328
- hidden_states=sample,
329
- temb=emb,
330
- res_hidden_states_tuple=res_samples,
331
- encoder_hidden_states=encoder_hidden_states,
332
- cross_attention_kwargs=cross_attention_kwargs,
333
- upsample_size=upsample_size,
334
- attention_mask=attention_mask,
335
- encoder_attention_mask=encoder_attention_mask,
336
- )
337
- else:
338
- sample = upsample_block(
339
- hidden_states=sample,
340
- temb=emb,
341
- res_hidden_states_tuple=res_samples,
342
- upsample_size=upsample_size,
343
- )
344
-
345
- # 6. post-process
346
- if self.conv_norm_out:
347
- sample = self.conv_norm_out(sample)
348
- sample = self.conv_act(sample)
349
- sample = self.conv_out(sample)
350
-
351
- if USE_PEFT_BACKEND:
352
- # remove `lora_scale` from each PEFT layer
353
- unscale_lora_layers(self, lora_scale)
354
-
355
- if not return_dict:
356
- return (sample,)
357
-
358
- return UNet2DConditionOutput(sample=sample)
359
-
360
- @classmethod
361
- def from_unet(cls, unet: UNet2DConditionModel, high_res_fix: list):
362
- config = dict((unet.config))
363
- config["high_res_fix"] = high_res_fix
364
- unet_high_res = cls(**config)
365
- unet_high_res.load_state_dict(unet.state_dict())
366
- unet_high_res.to(unet.dtype)
367
- return unet_high_res
368
-
369
-
370
- EXAMPLE_DOC_STRING = """
371
- Examples:
372
- ```py
373
- >>> import torch
374
- >>> from diffusers import DiffusionPipeline
375
-
376
- >>> pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
377
- custom_pipeline="kohya_hires_fix",
378
- torch_dtype=torch.float16,
379
- high_res_fix=[{'timestep': 600,
380
- 'scale_factor': 0.5,
381
- 'block_num': 1}])
382
- >>> pipe = pipe.to("cuda")
383
-
384
- >>> prompt = "a photo of an astronaut riding a horse on mars"
385
- >>> image = pipe(prompt, height=1000, width=1600).images[0]
386
- ```
387
- """
388
-
389
-
390
- class StableDiffusionHighResFixPipeline(StableDiffusionPipeline):
391
- r"""
392
- Pipeline for text-to-image generation using Stable Diffusion with Kohya fix for high resolution generation.
393
-
394
- This model inherits from [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods.
395
-
396
- The pipeline also inherits the following loading methods:
397
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
398
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
399
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
400
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
401
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
402
-
403
- Args:
404
- vae ([`AutoencoderKL`]):
405
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
406
- text_encoder ([`~transformers.CLIPTextModel`]):
407
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
408
- tokenizer ([`~transformers.CLIPTokenizer`]):
409
- A `CLIPTokenizer` to tokenize text.
410
- unet ([`UNet2DConditionModel`]):
411
- A `UNet2DConditionModel` to denoise the encoded image latents.
412
- scheduler ([`SchedulerMixin`]):
413
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
414
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
415
- safety_checker ([`StableDiffusionSafetyChecker`]):
416
- Classification module that estimates whether generated images could be considered offensive or harmful.
417
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
418
- about a model's potential harms.
419
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
420
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
421
- high_res_fix (`List[Dict]`, *optional*, defaults to `[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]`):
422
- Enables Kohya fix for high resolution generation. The activation maps are scaled based on the scale_factor up to the timestep at specified block_num.
423
- """
424
-
425
- model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
426
- _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
427
- _exclude_from_cpu_offload = ["safety_checker"]
428
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
429
-
430
- def __init__(
431
- self,
432
- vae: AutoencoderKL,
433
- text_encoder: CLIPTextModel,
434
- tokenizer: CLIPTokenizer,
435
- unet: UNet2DConditionModel,
436
- scheduler: KarrasDiffusionSchedulers,
437
- safety_checker: StableDiffusionSafetyChecker,
438
- feature_extractor: CLIPImageProcessor,
439
- image_encoder: CLIPVisionModelWithProjection = None,
440
- requires_safety_checker: bool = True,
441
- high_res_fix: List[Dict] = [{"timestep": 600, "scale_factor": 0.5, "block_num": 1}],
442
- ):
443
- super().__init__(
444
- vae=vae,
445
- text_encoder=text_encoder,
446
- tokenizer=tokenizer,
447
- unet=unet,
448
- scheduler=scheduler,
449
- safety_checker=safety_checker,
450
- feature_extractor=feature_extractor,
451
- image_encoder=image_encoder,
452
- requires_safety_checker=requires_safety_checker,
453
- )
454
-
455
- unet = UNet2DConditionModelHighResFix.from_unet(unet=unet, high_res_fix=high_res_fix)
456
- self.register_modules(
457
- vae=vae,
458
- text_encoder=text_encoder,
459
- tokenizer=tokenizer,
460
- unet=unet,
461
- scheduler=scheduler,
462
- safety_checker=safety_checker,
463
- feature_extractor=feature_extractor,
464
- image_encoder=image_encoder,
465
- )
466
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
467
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
468
- self.register_to_config(requires_safety_checker=requires_safety_checker)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
latent_consistency_img2img.py DELETED
@@ -1,821 +0,0 @@
1
- # Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
- # and https://github.com/hojonathanho/diffusion
17
-
18
- import math
19
- from dataclasses import dataclass
20
- from typing import Any, Dict, List, Optional, Tuple, Union
21
-
22
- import numpy as np
23
- import PIL.Image
24
- import torch
25
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
26
-
27
- from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
28
- from diffusers.configuration_utils import register_to_config
29
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
31
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
32
- from diffusers.utils import BaseOutput
33
- from diffusers.utils.torch_utils import randn_tensor
34
-
35
-
36
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
-
38
-
39
- class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):
40
- _optional_components = ["scheduler"]
41
-
42
- def __init__(
43
- self,
44
- vae: AutoencoderKL,
45
- text_encoder: CLIPTextModel,
46
- tokenizer: CLIPTokenizer,
47
- unet: UNet2DConditionModel,
48
- scheduler: "LCMSchedulerWithTimestamp",
49
- safety_checker: StableDiffusionSafetyChecker,
50
- feature_extractor: CLIPImageProcessor,
51
- requires_safety_checker: bool = True,
52
- ):
53
- super().__init__()
54
-
55
- scheduler = (
56
- scheduler
57
- if scheduler is not None
58
- else LCMSchedulerWithTimestamp(
59
- beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
60
- )
61
- )
62
-
63
- self.register_modules(
64
- vae=vae,
65
- text_encoder=text_encoder,
66
- tokenizer=tokenizer,
67
- unet=unet,
68
- scheduler=scheduler,
69
- safety_checker=safety_checker,
70
- feature_extractor=feature_extractor,
71
- )
72
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
73
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
74
-
75
- def _encode_prompt(
76
- self,
77
- prompt,
78
- device,
79
- num_images_per_prompt,
80
- prompt_embeds: None,
81
- ):
82
- r"""
83
- Encodes the prompt into text encoder hidden states.
84
- Args:
85
- prompt (`str` or `List[str]`, *optional*):
86
- prompt to be encoded
87
- device: (`torch.device`):
88
- torch device
89
- num_images_per_prompt (`int`):
90
- number of images that should be generated per prompt
91
- prompt_embeds (`torch.Tensor`, *optional*):
92
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
93
- provided, text embeddings will be generated from `prompt` input argument.
94
- """
95
-
96
- if prompt is not None and isinstance(prompt, str):
97
- pass
98
- elif prompt is not None and isinstance(prompt, list):
99
- len(prompt)
100
- else:
101
- prompt_embeds.shape[0]
102
-
103
- if prompt_embeds is None:
104
- text_inputs = self.tokenizer(
105
- prompt,
106
- padding="max_length",
107
- max_length=self.tokenizer.model_max_length,
108
- truncation=True,
109
- return_tensors="pt",
110
- )
111
- text_input_ids = text_inputs.input_ids
112
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
113
-
114
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
115
- text_input_ids, untruncated_ids
116
- ):
117
- removed_text = self.tokenizer.batch_decode(
118
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
119
- )
120
- logger.warning(
121
- "The following part of your input was truncated because CLIP can only handle sequences up to"
122
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
123
- )
124
-
125
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
126
- attention_mask = text_inputs.attention_mask.to(device)
127
- else:
128
- attention_mask = None
129
-
130
- prompt_embeds = self.text_encoder(
131
- text_input_ids.to(device),
132
- attention_mask=attention_mask,
133
- )
134
- prompt_embeds = prompt_embeds[0]
135
-
136
- if self.text_encoder is not None:
137
- prompt_embeds_dtype = self.text_encoder.dtype
138
- elif self.unet is not None:
139
- prompt_embeds_dtype = self.unet.dtype
140
- else:
141
- prompt_embeds_dtype = prompt_embeds.dtype
142
-
143
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
144
-
145
- bs_embed, seq_len, _ = prompt_embeds.shape
146
- # duplicate text embeddings for each generation per prompt, using mps friendly method
147
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
148
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
149
-
150
- # Don't need to get uncond prompt embedding because of LCM Guided Distillation
151
- return prompt_embeds
152
-
153
- def run_safety_checker(self, image, device, dtype):
154
- if self.safety_checker is None:
155
- has_nsfw_concept = None
156
- else:
157
- if torch.is_tensor(image):
158
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
159
- else:
160
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
161
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
162
- image, has_nsfw_concept = self.safety_checker(
163
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
164
- )
165
- return image, has_nsfw_concept
166
-
167
- def prepare_latents(
168
- self,
169
- image,
170
- timestep,
171
- batch_size,
172
- num_channels_latents,
173
- height,
174
- width,
175
- dtype,
176
- device,
177
- latents=None,
178
- generator=None,
179
- ):
180
- shape = (
181
- batch_size,
182
- num_channels_latents,
183
- int(height) // self.vae_scale_factor,
184
- int(width) // self.vae_scale_factor,
185
- )
186
-
187
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
188
- raise ValueError(
189
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
190
- )
191
-
192
- image = image.to(device=device, dtype=dtype)
193
-
194
- # batch_size = batch_size * num_images_per_prompt
195
-
196
- if image.shape[1] == 4:
197
- init_latents = image
198
-
199
- else:
200
- if isinstance(generator, list) and len(generator) != batch_size:
201
- raise ValueError(
202
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
203
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
204
- )
205
-
206
- elif isinstance(generator, list):
207
- init_latents = [
208
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
209
- ]
210
- init_latents = torch.cat(init_latents, dim=0)
211
- else:
212
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
213
-
214
- init_latents = self.vae.config.scaling_factor * init_latents
215
-
216
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
217
- # expand init_latents for batch_size
218
- (
219
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
220
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
221
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
222
- " your script to pass as many initial images as text prompts to suppress this warning."
223
- )
224
- # deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
225
- additional_image_per_prompt = batch_size // init_latents.shape[0]
226
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
227
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
228
- raise ValueError(
229
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
230
- )
231
- else:
232
- init_latents = torch.cat([init_latents], dim=0)
233
-
234
- shape = init_latents.shape
235
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
236
-
237
- # get latents
238
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
239
- latents = init_latents
240
-
241
- return latents
242
-
243
- def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
244
- """
245
- see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
246
- Args:
247
- timesteps: torch.Tensor: generate embedding vectors at these timesteps
248
- embedding_dim: int: dimension of the embeddings to generate
249
- dtype: data type of the generated embeddings
250
- Returns:
251
- embedding vectors with shape `(len(timesteps), embedding_dim)`
252
- """
253
- assert len(w.shape) == 1
254
- w = w * 1000.0
255
-
256
- half_dim = embedding_dim // 2
257
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
258
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
259
- emb = w.to(dtype)[:, None] * emb[None, :]
260
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
261
- if embedding_dim % 2 == 1: # zero pad
262
- emb = torch.nn.functional.pad(emb, (0, 1))
263
- assert emb.shape == (w.shape[0], embedding_dim)
264
- return emb
265
-
266
- def get_timesteps(self, num_inference_steps, strength, device):
267
- # get the original timestep using init_timestep
268
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
269
-
270
- t_start = max(num_inference_steps - init_timestep, 0)
271
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
272
-
273
- return timesteps, num_inference_steps - t_start
274
-
275
- @torch.no_grad()
276
- def __call__(
277
- self,
278
- prompt: Union[str, List[str]] = None,
279
- image: PipelineImageInput = None,
280
- strength: float = 0.8,
281
- height: Optional[int] = 768,
282
- width: Optional[int] = 768,
283
- guidance_scale: float = 7.5,
284
- num_images_per_prompt: Optional[int] = 1,
285
- latents: Optional[torch.Tensor] = None,
286
- num_inference_steps: int = 4,
287
- lcm_origin_steps: int = 50,
288
- prompt_embeds: Optional[torch.Tensor] = None,
289
- output_type: Optional[str] = "pil",
290
- return_dict: bool = True,
291
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
292
- ):
293
- # 0. Default height and width to unet
294
- height = height or self.unet.config.sample_size * self.vae_scale_factor
295
- width = width or self.unet.config.sample_size * self.vae_scale_factor
296
-
297
- # 2. Define call parameters
298
- if prompt is not None and isinstance(prompt, str):
299
- batch_size = 1
300
- elif prompt is not None and isinstance(prompt, list):
301
- batch_size = len(prompt)
302
- else:
303
- batch_size = prompt_embeds.shape[0]
304
-
305
- device = self._execution_device
306
- # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
307
-
308
- # 3. Encode input prompt
309
- prompt_embeds = self._encode_prompt(
310
- prompt,
311
- device,
312
- num_images_per_prompt,
313
- prompt_embeds=prompt_embeds,
314
- )
315
-
316
- # 3.5 encode image
317
- image = self.image_processor.preprocess(image)
318
-
319
- # 4. Prepare timesteps
320
- self.scheduler.set_timesteps(strength, num_inference_steps, lcm_origin_steps)
321
- # timesteps = self.scheduler.timesteps
322
- # timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device)
323
- timesteps = self.scheduler.timesteps
324
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
325
-
326
- print("timesteps: ", timesteps)
327
-
328
- # 5. Prepare latent variable
329
- num_channels_latents = self.unet.config.in_channels
330
- if latents is None:
331
- latents = self.prepare_latents(
332
- image,
333
- latent_timestep,
334
- batch_size * num_images_per_prompt,
335
- num_channels_latents,
336
- height,
337
- width,
338
- prompt_embeds.dtype,
339
- device,
340
- latents,
341
- )
342
- bs = batch_size * num_images_per_prompt
343
-
344
- # 6. Get Guidance Scale Embedding
345
- w = torch.tensor(guidance_scale).repeat(bs)
346
- w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
347
-
348
- # 7. LCM MultiStep Sampling Loop:
349
- with self.progress_bar(total=num_inference_steps) as progress_bar:
350
- for i, t in enumerate(timesteps):
351
- ts = torch.full((bs,), t, device=device, dtype=torch.long)
352
- latents = latents.to(prompt_embeds.dtype)
353
-
354
- # model prediction (v-prediction, eps, x)
355
- model_pred = self.unet(
356
- latents,
357
- ts,
358
- timestep_cond=w_embedding,
359
- encoder_hidden_states=prompt_embeds,
360
- cross_attention_kwargs=cross_attention_kwargs,
361
- return_dict=False,
362
- )[0]
363
-
364
- # compute the previous noisy sample x_t -> x_t-1
365
- latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
366
-
367
- # # call the callback, if provided
368
- # if i == len(timesteps) - 1:
369
- progress_bar.update()
370
-
371
- denoised = denoised.to(prompt_embeds.dtype)
372
- if not output_type == "latent":
373
- image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
374
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
375
- else:
376
- image = denoised
377
- has_nsfw_concept = None
378
-
379
- if has_nsfw_concept is None:
380
- do_denormalize = [True] * image.shape[0]
381
- else:
382
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
383
-
384
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
385
-
386
- if not return_dict:
387
- return (image, has_nsfw_concept)
388
-
389
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
390
-
391
-
392
- @dataclass
393
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
394
- class LCMSchedulerOutput(BaseOutput):
395
- """
396
- Output class for the scheduler's `step` function output.
397
- Args:
398
- prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
399
- Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
400
- denoising loop.
401
- pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
402
- The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
403
- `pred_original_sample` can be used to preview progress or for guidance.
404
- """
405
-
406
- prev_sample: torch.Tensor
407
- denoised: Optional[torch.Tensor] = None
408
-
409
-
410
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
411
- def betas_for_alpha_bar(
412
- num_diffusion_timesteps,
413
- max_beta=0.999,
414
- alpha_transform_type="cosine",
415
- ):
416
- """
417
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
418
- (1-beta) over time from t = [0,1].
419
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
420
- to that part of the diffusion process.
421
- Args:
422
- num_diffusion_timesteps (`int`): the number of betas to produce.
423
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
424
- prevent singularities.
425
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
426
- Choose from `cosine` or `exp`
427
- Returns:
428
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
429
- """
430
- if alpha_transform_type == "cosine":
431
-
432
- def alpha_bar_fn(t):
433
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
434
-
435
- elif alpha_transform_type == "exp":
436
-
437
- def alpha_bar_fn(t):
438
- return math.exp(t * -12.0)
439
-
440
- else:
441
- raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
442
-
443
- betas = []
444
- for i in range(num_diffusion_timesteps):
445
- t1 = i / num_diffusion_timesteps
446
- t2 = (i + 1) / num_diffusion_timesteps
447
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
448
- return torch.tensor(betas, dtype=torch.float32)
449
-
450
-
451
- def rescale_zero_terminal_snr(betas):
452
- """
453
- Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
454
- Args:
455
- betas (`torch.Tensor`):
456
- the betas that the scheduler is being initialized with.
457
- Returns:
458
- `torch.Tensor`: rescaled betas with zero terminal SNR
459
- """
460
- # Convert betas to alphas_bar_sqrt
461
- alphas = 1.0 - betas
462
- alphas_cumprod = torch.cumprod(alphas, dim=0)
463
- alphas_bar_sqrt = alphas_cumprod.sqrt()
464
-
465
- # Store old values.
466
- alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
467
- alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
468
-
469
- # Shift so the last timestep is zero.
470
- alphas_bar_sqrt -= alphas_bar_sqrt_T
471
-
472
- # Scale so the first timestep is back to the old value.
473
- alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
474
-
475
- # Convert alphas_bar_sqrt to betas
476
- alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
477
- alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
478
- alphas = torch.cat([alphas_bar[0:1], alphas])
479
- betas = 1 - alphas
480
-
481
- return betas
482
-
483
-
484
- class LCMSchedulerWithTimestamp(SchedulerMixin, ConfigMixin):
485
- """
486
- This class modifies LCMScheduler to add a timestamp argument to set_timesteps
487
-
488
-
489
- `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
490
- non-Markovian guidance.
491
- This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
492
- methods the library implements for all schedulers such as loading and saving.
493
- Args:
494
- num_train_timesteps (`int`, defaults to 1000):
495
- The number of diffusion steps to train the model.
496
- beta_start (`float`, defaults to 0.0001):
497
- The starting `beta` value of inference.
498
- beta_end (`float`, defaults to 0.02):
499
- The final `beta` value.
500
- beta_schedule (`str`, defaults to `"linear"`):
501
- The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
502
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
503
- trained_betas (`np.ndarray`, *optional*):
504
- Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
505
- clip_sample (`bool`, defaults to `True`):
506
- Clip the predicted sample for numerical stability.
507
- clip_sample_range (`float`, defaults to 1.0):
508
- The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
509
- set_alpha_to_one (`bool`, defaults to `True`):
510
- Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
511
- there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
512
- otherwise it uses the alpha value at step 0.
513
- steps_offset (`int`, defaults to 0):
514
- An offset added to the inference steps, as required by some model families.
515
- prediction_type (`str`, defaults to `epsilon`, *optional*):
516
- Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
517
- `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
518
- Video](https://imagen.research.google/video/paper.pdf) paper).
519
- thresholding (`bool`, defaults to `False`):
520
- Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
521
- as Stable Diffusion.
522
- dynamic_thresholding_ratio (`float`, defaults to 0.995):
523
- The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
524
- sample_max_value (`float`, defaults to 1.0):
525
- The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
526
- timestep_spacing (`str`, defaults to `"leading"`):
527
- The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
528
- Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
529
- rescale_betas_zero_snr (`bool`, defaults to `False`):
530
- Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
531
- dark samples instead of limiting it to samples with medium brightness. Loosely related to
532
- [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
533
- """
534
-
535
- # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
536
- order = 1
537
-
538
- @register_to_config
539
- def __init__(
540
- self,
541
- num_train_timesteps: int = 1000,
542
- beta_start: float = 0.0001,
543
- beta_end: float = 0.02,
544
- beta_schedule: str = "linear",
545
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
546
- clip_sample: bool = True,
547
- set_alpha_to_one: bool = True,
548
- steps_offset: int = 0,
549
- prediction_type: str = "epsilon",
550
- thresholding: bool = False,
551
- dynamic_thresholding_ratio: float = 0.995,
552
- clip_sample_range: float = 1.0,
553
- sample_max_value: float = 1.0,
554
- timestep_spacing: str = "leading",
555
- rescale_betas_zero_snr: bool = False,
556
- ):
557
- if trained_betas is not None:
558
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
559
- elif beta_schedule == "linear":
560
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
561
- elif beta_schedule == "scaled_linear":
562
- # this schedule is very specific to the latent diffusion model.
563
- self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
564
- elif beta_schedule == "squaredcos_cap_v2":
565
- # Glide cosine schedule
566
- self.betas = betas_for_alpha_bar(num_train_timesteps)
567
- else:
568
- raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
569
-
570
- # Rescale for zero SNR
571
- if rescale_betas_zero_snr:
572
- self.betas = rescale_zero_terminal_snr(self.betas)
573
-
574
- self.alphas = 1.0 - self.betas
575
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
576
-
577
- # At every step in ddim, we are looking into the previous alphas_cumprod
578
- # For the final step, there is no previous alphas_cumprod because we are already at 0
579
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
580
- # whether we use the final alpha of the "non-previous" one.
581
- self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
582
-
583
- # standard deviation of the initial noise distribution
584
- self.init_noise_sigma = 1.0
585
-
586
- # setable values
587
- self.num_inference_steps = None
588
- self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
589
-
590
- def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
591
- """
592
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
593
- current timestep.
594
- Args:
595
- sample (`torch.Tensor`):
596
- The input sample.
597
- timestep (`int`, *optional*):
598
- The current timestep in the diffusion chain.
599
- Returns:
600
- `torch.Tensor`:
601
- A scaled input sample.
602
- """
603
- return sample
604
-
605
- def _get_variance(self, timestep, prev_timestep):
606
- alpha_prod_t = self.alphas_cumprod[timestep]
607
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
608
- beta_prod_t = 1 - alpha_prod_t
609
- beta_prod_t_prev = 1 - alpha_prod_t_prev
610
-
611
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
612
-
613
- return variance
614
-
615
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
616
- def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
617
- """
618
- "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
619
- prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
620
- s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
621
- pixels from saturation at each step. We find that dynamic thresholding results in significantly better
622
- photorealism as well as better image-text alignment, especially when using very large guidance weights."
623
- https://arxiv.org/abs/2205.11487
624
- """
625
- dtype = sample.dtype
626
- batch_size, channels, height, width = sample.shape
627
-
628
- if dtype not in (torch.float32, torch.float64):
629
- sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
630
-
631
- # Flatten sample for doing quantile calculation along each image
632
- sample = sample.reshape(batch_size, channels * height * width)
633
-
634
- abs_sample = sample.abs() # "a certain percentile absolute pixel value"
635
-
636
- s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
637
- s = torch.clamp(
638
- s, min=1, max=self.config.sample_max_value
639
- ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
640
-
641
- s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
642
- sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
643
-
644
- sample = sample.reshape(batch_size, channels, height, width)
645
- sample = sample.to(dtype)
646
-
647
- return sample
648
-
649
- def set_timesteps(
650
- self, stength, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None
651
- ):
652
- """
653
- Sets the discrete timesteps used for the diffusion chain (to be run before inference).
654
- Args:
655
- num_inference_steps (`int`):
656
- The number of diffusion steps used when generating samples with a pre-trained model.
657
- """
658
-
659
- if num_inference_steps > self.config.num_train_timesteps:
660
- raise ValueError(
661
- f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
662
- f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
663
- f" maximal {self.config.num_train_timesteps} timesteps."
664
- )
665
-
666
- self.num_inference_steps = num_inference_steps
667
-
668
- # LCM Timesteps Setting: # Linear Spacing
669
- c = self.config.num_train_timesteps // lcm_origin_steps
670
- lcm_origin_timesteps = (
671
- np.asarray(list(range(1, int(lcm_origin_steps * stength) + 1))) * c - 1
672
- ) # LCM Training Steps Schedule
673
- skipping_step = len(lcm_origin_timesteps) // num_inference_steps
674
- timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
675
-
676
- self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
677
-
678
- def get_scalings_for_boundary_condition_discrete(self, t):
679
- self.sigma_data = 0.5 # Default: 0.5
680
-
681
- # By dividing 0.1: This is almost a delta function at t=0.
682
- c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
683
- c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
684
- return c_skip, c_out
685
-
686
- def step(
687
- self,
688
- model_output: torch.Tensor,
689
- timeindex: int,
690
- timestep: int,
691
- sample: torch.Tensor,
692
- eta: float = 0.0,
693
- use_clipped_model_output: bool = False,
694
- generator=None,
695
- variance_noise: Optional[torch.Tensor] = None,
696
- return_dict: bool = True,
697
- ) -> Union[LCMSchedulerOutput, Tuple]:
698
- """
699
- Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
700
- process from the learned model outputs (most often the predicted noise).
701
- Args:
702
- model_output (`torch.Tensor`):
703
- The direct output from learned diffusion model.
704
- timestep (`float`):
705
- The current discrete timestep in the diffusion chain.
706
- sample (`torch.Tensor`):
707
- A current instance of a sample created by the diffusion process.
708
- eta (`float`):
709
- The weight of noise for added noise in diffusion step.
710
- use_clipped_model_output (`bool`, defaults to `False`):
711
- If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
712
- because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
713
- clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
714
- `use_clipped_model_output` has no effect.
715
- generator (`torch.Generator`, *optional*):
716
- A random number generator.
717
- variance_noise (`torch.Tensor`):
718
- Alternative to generating noise with `generator` by directly providing the noise for the variance
719
- itself. Useful for methods such as [`CycleDiffusion`].
720
- return_dict (`bool`, *optional*, defaults to `True`):
721
- Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
722
- Returns:
723
- [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
724
- If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
725
- tuple is returned where the first element is the sample tensor.
726
- """
727
- if self.num_inference_steps is None:
728
- raise ValueError(
729
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
730
- )
731
-
732
- # 1. get previous step value
733
- prev_timeindex = timeindex + 1
734
- if prev_timeindex < len(self.timesteps):
735
- prev_timestep = self.timesteps[prev_timeindex]
736
- else:
737
- prev_timestep = timestep
738
-
739
- # 2. compute alphas, betas
740
- alpha_prod_t = self.alphas_cumprod[timestep]
741
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
742
-
743
- beta_prod_t = 1 - alpha_prod_t
744
- beta_prod_t_prev = 1 - alpha_prod_t_prev
745
-
746
- # 3. Get scalings for boundary conditions
747
- c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
748
-
749
- # 4. Different Parameterization:
750
- parameterization = self.config.prediction_type
751
-
752
- if parameterization == "epsilon": # noise-prediction
753
- pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
754
-
755
- elif parameterization == "sample": # x-prediction
756
- pred_x0 = model_output
757
-
758
- elif parameterization == "v_prediction": # v-prediction
759
- pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
760
-
761
- # 4. Denoise model output using boundary conditions
762
- denoised = c_out * pred_x0 + c_skip * sample
763
-
764
- # 5. Sample z ~ N(0, I), For MultiStep Inference
765
- # Noise is not used for one-step sampling.
766
- if len(self.timesteps) > 1:
767
- noise = torch.randn(model_output.shape).to(model_output.device)
768
- prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
769
- else:
770
- prev_sample = denoised
771
-
772
- if not return_dict:
773
- return (prev_sample, denoised)
774
-
775
- return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
776
-
777
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
778
- def add_noise(
779
- self,
780
- original_samples: torch.Tensor,
781
- noise: torch.Tensor,
782
- timesteps: torch.IntTensor,
783
- ) -> torch.Tensor:
784
- # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
785
- alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
786
- timesteps = timesteps.to(original_samples.device)
787
-
788
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
789
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
790
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
791
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
792
-
793
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
794
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
795
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
796
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
797
-
798
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
799
- return noisy_samples
800
-
801
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
802
- def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor:
803
- # Make sure alphas_cumprod and timestep have same device and dtype as sample
804
- alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
805
- timesteps = timesteps.to(sample.device)
806
-
807
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
808
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
809
- while len(sqrt_alpha_prod.shape) < len(sample.shape):
810
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
811
-
812
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
813
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
814
- while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
815
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
816
-
817
- velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
818
- return velocity
819
-
820
- def __len__(self):
821
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
latent_consistency_interpolate.py DELETED
@@ -1,995 +0,0 @@
1
- import inspect
2
- from typing import Any, Callable, Dict, List, Optional, Union
3
-
4
- import numpy as np
5
- import torch
6
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
7
-
8
- from diffusers.image_processor import VaeImageProcessor
9
- from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
10
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
11
- from diffusers.models.lora import adjust_lora_scale_text_encoder
12
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
13
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
14
- from diffusers.schedulers import LCMScheduler
15
- from diffusers.utils import (
16
- USE_PEFT_BACKEND,
17
- deprecate,
18
- logging,
19
- replace_example_docstring,
20
- scale_lora_layers,
21
- unscale_lora_layers,
22
- )
23
- from diffusers.utils.torch_utils import randn_tensor
24
-
25
-
26
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
27
-
28
- EXAMPLE_DOC_STRING = """
29
- Examples:
30
- ```py
31
- >>> import torch
32
- >>> import numpy as np
33
-
34
- >>> from diffusers import DiffusionPipeline
35
-
36
- >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_interpolate")
37
- >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality.
38
- >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32)
39
-
40
- >>> prompts = ["A cat", "A dog", "A horse"]
41
- >>> num_inference_steps = 4
42
- >>> num_interpolation_steps = 24
43
- >>> seed = 1337
44
-
45
- >>> torch.manual_seed(seed)
46
- >>> np.random.seed(seed)
47
-
48
- >>> images = pipe(
49
- prompt=prompts,
50
- height=512,
51
- width=512,
52
- num_inference_steps=num_inference_steps,
53
- num_interpolation_steps=num_interpolation_steps,
54
- guidance_scale=8.0,
55
- embedding_interpolation_type="lerp",
56
- latent_interpolation_type="slerp",
57
- process_batch_size=4, # Make it higher or lower based on your GPU memory
58
- generator=torch.Generator(seed),
59
- )
60
-
61
- >>> # Save the images as a video
62
- >>> import imageio
63
- >>> from PIL import Image
64
-
65
- >>> def pil_to_video(images: List[Image.Image], filename: str, fps: int = 60) -> None:
66
- frames = [np.array(image) for image in images]
67
- with imageio.get_writer(filename, fps=fps) as video_writer:
68
- for frame in frames:
69
- video_writer.append_data(frame)
70
-
71
- >>> pil_to_video(images, "lcm_interpolate.mp4", fps=24)
72
- ```
73
- """
74
-
75
-
76
- def lerp(
77
- v0: Union[torch.Tensor, np.ndarray],
78
- v1: Union[torch.Tensor, np.ndarray],
79
- t: Union[float, torch.Tensor, np.ndarray],
80
- ) -> Union[torch.Tensor, np.ndarray]:
81
- """
82
- Linearly interpolate between two vectors/tensors.
83
-
84
- Args:
85
- v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
86
- v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
87
- t: (`float`, `torch.Tensor`, or `np.ndarray`):
88
- Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
89
- torch.Tensor, must be one dimensional with values between 0 and 1.
90
-
91
- Returns:
92
- Union[torch.Tensor, np.ndarray]
93
- Interpolated vector/tensor between v0 and v1.
94
- """
95
- inputs_are_torch = False
96
- t_is_float = False
97
-
98
- if isinstance(v0, torch.Tensor):
99
- inputs_are_torch = True
100
- input_device = v0.device
101
- v0 = v0.cpu().numpy()
102
- v1 = v1.cpu().numpy()
103
-
104
- if isinstance(t, torch.Tensor):
105
- inputs_are_torch = True
106
- input_device = t.device
107
- t = t.cpu().numpy()
108
- elif isinstance(t, float):
109
- t_is_float = True
110
- t = np.array([t])
111
-
112
- t = t[..., None]
113
- v0 = v0[None, ...]
114
- v1 = v1[None, ...]
115
- v2 = (1 - t) * v0 + t * v1
116
-
117
- if t_is_float and v0.ndim > 1:
118
- assert v2.shape[0] == 1
119
- v2 = np.squeeze(v2, axis=0)
120
- if inputs_are_torch:
121
- v2 = torch.from_numpy(v2).to(input_device)
122
-
123
- return v2
124
-
125
-
126
- def slerp(
127
- v0: Union[torch.Tensor, np.ndarray],
128
- v1: Union[torch.Tensor, np.ndarray],
129
- t: Union[float, torch.Tensor, np.ndarray],
130
- DOT_THRESHOLD=0.9995,
131
- ) -> Union[torch.Tensor, np.ndarray]:
132
- """
133
- Spherical linear interpolation between two vectors/tensors.
134
-
135
- Args:
136
- v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
137
- v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
138
- t: (`float`, `torch.Tensor`, or `np.ndarray`):
139
- Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
140
- torch.Tensor, must be one dimensional with values between 0 and 1.
141
- DOT_THRESHOLD (`float`, *optional*, default=0.9995):
142
- Threshold for when to use linear interpolation instead of spherical interpolation.
143
-
144
- Returns:
145
- `torch.Tensor` or `np.ndarray`:
146
- Interpolated vector/tensor between v0 and v1.
147
- """
148
- inputs_are_torch = False
149
- t_is_float = False
150
-
151
- if isinstance(v0, torch.Tensor):
152
- inputs_are_torch = True
153
- input_device = v0.device
154
- v0 = v0.cpu().numpy()
155
- v1 = v1.cpu().numpy()
156
-
157
- if isinstance(t, torch.Tensor):
158
- inputs_are_torch = True
159
- input_device = t.device
160
- t = t.cpu().numpy()
161
- elif isinstance(t, float):
162
- t_is_float = True
163
- t = np.array([t], dtype=v0.dtype)
164
-
165
- dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
166
- if np.abs(dot) > DOT_THRESHOLD:
167
- # v1 and v2 are close to parallel
168
- # Use linear interpolation instead
169
- v2 = lerp(v0, v1, t)
170
- else:
171
- theta_0 = np.arccos(dot)
172
- sin_theta_0 = np.sin(theta_0)
173
- theta_t = theta_0 * t
174
- sin_theta_t = np.sin(theta_t)
175
- s0 = np.sin(theta_0 - theta_t) / sin_theta_0
176
- s1 = sin_theta_t / sin_theta_0
177
- s0 = s0[..., None]
178
- s1 = s1[..., None]
179
- v0 = v0[None, ...]
180
- v1 = v1[None, ...]
181
- v2 = s0 * v0 + s1 * v1
182
-
183
- if t_is_float and v0.ndim > 1:
184
- assert v2.shape[0] == 1
185
- v2 = np.squeeze(v2, axis=0)
186
- if inputs_are_torch:
187
- v2 = torch.from_numpy(v2).to(input_device)
188
-
189
- return v2
190
-
191
-
192
- class LatentConsistencyModelWalkPipeline(
193
- DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
194
- ):
195
- r"""
196
- Pipeline for text-to-image generation using a latent consistency model.
197
-
198
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
199
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
200
-
201
- The pipeline also inherits the following loading methods:
202
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
203
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
204
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
205
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
206
-
207
- Args:
208
- vae ([`AutoencoderKL`]):
209
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
210
- text_encoder ([`~transformers.CLIPTextModel`]):
211
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
212
- tokenizer ([`~transformers.CLIPTokenizer`]):
213
- A `CLIPTokenizer` to tokenize text.
214
- unet ([`UNet2DConditionModel`]):
215
- A `UNet2DConditionModel` to denoise the encoded image latents.
216
- scheduler ([`SchedulerMixin`]):
217
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only
218
- supports [`LCMScheduler`].
219
- safety_checker ([`StableDiffusionSafetyChecker`]):
220
- Classification module that estimates whether generated images could be considered offensive or harmful.
221
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
222
- about a model's potential harms.
223
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
224
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
225
- requires_safety_checker (`bool`, *optional*, defaults to `True`):
226
- Whether the pipeline requires a safety checker component.
227
- """
228
-
229
- model_cpu_offload_seq = "text_encoder->unet->vae"
230
- _optional_components = ["safety_checker", "feature_extractor"]
231
- _exclude_from_cpu_offload = ["safety_checker"]
232
- _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"]
233
-
234
- def __init__(
235
- self,
236
- vae: AutoencoderKL,
237
- text_encoder: CLIPTextModel,
238
- tokenizer: CLIPTokenizer,
239
- unet: UNet2DConditionModel,
240
- scheduler: LCMScheduler,
241
- safety_checker: StableDiffusionSafetyChecker,
242
- feature_extractor: CLIPImageProcessor,
243
- requires_safety_checker: bool = True,
244
- ):
245
- super().__init__()
246
-
247
- if safety_checker is None and requires_safety_checker:
248
- logger.warning(
249
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
250
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
251
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
252
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
253
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
254
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
255
- )
256
-
257
- if safety_checker is not None and feature_extractor is None:
258
- raise ValueError(
259
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
260
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
261
- )
262
-
263
- self.register_modules(
264
- vae=vae,
265
- text_encoder=text_encoder,
266
- tokenizer=tokenizer,
267
- unet=unet,
268
- scheduler=scheduler,
269
- safety_checker=safety_checker,
270
- feature_extractor=feature_extractor,
271
- )
272
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
273
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
274
- self.register_to_config(requires_safety_checker=requires_safety_checker)
275
-
276
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
277
- def encode_prompt(
278
- self,
279
- prompt,
280
- device,
281
- num_images_per_prompt,
282
- do_classifier_free_guidance,
283
- negative_prompt=None,
284
- prompt_embeds: Optional[torch.Tensor] = None,
285
- negative_prompt_embeds: Optional[torch.Tensor] = None,
286
- lora_scale: Optional[float] = None,
287
- clip_skip: Optional[int] = None,
288
- ):
289
- r"""
290
- Encodes the prompt into text encoder hidden states.
291
-
292
- Args:
293
- prompt (`str` or `List[str]`, *optional*):
294
- prompt to be encoded
295
- device: (`torch.device`):
296
- torch device
297
- num_images_per_prompt (`int`):
298
- number of images that should be generated per prompt
299
- do_classifier_free_guidance (`bool`):
300
- whether to use classifier free guidance or not
301
- negative_prompt (`str` or `List[str]`, *optional*):
302
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
303
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
304
- less than `1`).
305
- prompt_embeds (`torch.Tensor`, *optional*):
306
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
307
- provided, text embeddings will be generated from `prompt` input argument.
308
- negative_prompt_embeds (`torch.Tensor`, *optional*):
309
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
310
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
311
- argument.
312
- lora_scale (`float`, *optional*):
313
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
314
- clip_skip (`int`, *optional*):
315
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
316
- the output of the pre-final layer will be used for computing the prompt embeddings.
317
- """
318
- # set lora scale so that monkey patched LoRA
319
- # function of text encoder can correctly access it
320
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
321
- self._lora_scale = lora_scale
322
-
323
- # dynamically adjust the LoRA scale
324
- if not USE_PEFT_BACKEND:
325
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
326
- else:
327
- scale_lora_layers(self.text_encoder, lora_scale)
328
-
329
- if prompt is not None and isinstance(prompt, str):
330
- batch_size = 1
331
- elif prompt is not None and isinstance(prompt, list):
332
- batch_size = len(prompt)
333
- else:
334
- batch_size = prompt_embeds.shape[0]
335
-
336
- if prompt_embeds is None:
337
- # textual inversion: process multi-vector tokens if necessary
338
- if isinstance(self, TextualInversionLoaderMixin):
339
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
340
-
341
- text_inputs = self.tokenizer(
342
- prompt,
343
- padding="max_length",
344
- max_length=self.tokenizer.model_max_length,
345
- truncation=True,
346
- return_tensors="pt",
347
- )
348
- text_input_ids = text_inputs.input_ids
349
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
350
-
351
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
352
- text_input_ids, untruncated_ids
353
- ):
354
- removed_text = self.tokenizer.batch_decode(
355
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
356
- )
357
- logger.warning(
358
- "The following part of your input was truncated because CLIP can only handle sequences up to"
359
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
360
- )
361
-
362
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
363
- attention_mask = text_inputs.attention_mask.to(device)
364
- else:
365
- attention_mask = None
366
-
367
- if clip_skip is None:
368
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
369
- prompt_embeds = prompt_embeds[0]
370
- else:
371
- prompt_embeds = self.text_encoder(
372
- text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
373
- )
374
- # Access the `hidden_states` first, that contains a tuple of
375
- # all the hidden states from the encoder layers. Then index into
376
- # the tuple to access the hidden states from the desired layer.
377
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
378
- # We also need to apply the final LayerNorm here to not mess with the
379
- # representations. The `last_hidden_states` that we typically use for
380
- # obtaining the final prompt representations passes through the LayerNorm
381
- # layer.
382
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
383
-
384
- if self.text_encoder is not None:
385
- prompt_embeds_dtype = self.text_encoder.dtype
386
- elif self.unet is not None:
387
- prompt_embeds_dtype = self.unet.dtype
388
- else:
389
- prompt_embeds_dtype = prompt_embeds.dtype
390
-
391
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
392
-
393
- bs_embed, seq_len, _ = prompt_embeds.shape
394
- # duplicate text embeddings for each generation per prompt, using mps friendly method
395
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
396
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
397
-
398
- # get unconditional embeddings for classifier free guidance
399
- if do_classifier_free_guidance and negative_prompt_embeds is None:
400
- uncond_tokens: List[str]
401
- if negative_prompt is None:
402
- uncond_tokens = [""] * batch_size
403
- elif prompt is not None and type(prompt) is not type(negative_prompt):
404
- raise TypeError(
405
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
406
- f" {type(prompt)}."
407
- )
408
- elif isinstance(negative_prompt, str):
409
- uncond_tokens = [negative_prompt]
410
- elif batch_size != len(negative_prompt):
411
- raise ValueError(
412
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
413
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
414
- " the batch size of `prompt`."
415
- )
416
- else:
417
- uncond_tokens = negative_prompt
418
-
419
- # textual inversion: process multi-vector tokens if necessary
420
- if isinstance(self, TextualInversionLoaderMixin):
421
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
422
-
423
- max_length = prompt_embeds.shape[1]
424
- uncond_input = self.tokenizer(
425
- uncond_tokens,
426
- padding="max_length",
427
- max_length=max_length,
428
- truncation=True,
429
- return_tensors="pt",
430
- )
431
-
432
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
433
- attention_mask = uncond_input.attention_mask.to(device)
434
- else:
435
- attention_mask = None
436
-
437
- negative_prompt_embeds = self.text_encoder(
438
- uncond_input.input_ids.to(device),
439
- attention_mask=attention_mask,
440
- )
441
- negative_prompt_embeds = negative_prompt_embeds[0]
442
-
443
- if do_classifier_free_guidance:
444
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
445
- seq_len = negative_prompt_embeds.shape[1]
446
-
447
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
448
-
449
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
450
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
451
-
452
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
453
- # Retrieve the original scale by scaling back the LoRA layers
454
- unscale_lora_layers(self.text_encoder, lora_scale)
455
-
456
- return prompt_embeds, negative_prompt_embeds
457
-
458
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
459
- def run_safety_checker(self, image, device, dtype):
460
- if self.safety_checker is None:
461
- has_nsfw_concept = None
462
- else:
463
- if torch.is_tensor(image):
464
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
465
- else:
466
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
467
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
468
- image, has_nsfw_concept = self.safety_checker(
469
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
470
- )
471
- return image, has_nsfw_concept
472
-
473
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
474
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
475
- shape = (
476
- batch_size,
477
- num_channels_latents,
478
- int(height) // self.vae_scale_factor,
479
- int(width) // self.vae_scale_factor,
480
- )
481
- if isinstance(generator, list) and len(generator) != batch_size:
482
- raise ValueError(
483
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
484
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
485
- )
486
-
487
- if latents is None:
488
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
489
- else:
490
- latents = latents.to(device)
491
-
492
- # scale the initial noise by the standard deviation required by the scheduler
493
- latents = latents * self.scheduler.init_noise_sigma
494
- return latents
495
-
496
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
497
- """
498
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
499
-
500
- Args:
501
- timesteps (`torch.Tensor`):
502
- generate embedding vectors at these timesteps
503
- embedding_dim (`int`, *optional*, defaults to 512):
504
- dimension of the embeddings to generate
505
- dtype:
506
- data type of the generated embeddings
507
-
508
- Returns:
509
- `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
510
- """
511
- assert len(w.shape) == 1
512
- w = w * 1000.0
513
-
514
- half_dim = embedding_dim // 2
515
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
516
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
517
- emb = w.to(dtype)[:, None] * emb[None, :]
518
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
519
- if embedding_dim % 2 == 1: # zero pad
520
- emb = torch.nn.functional.pad(emb, (0, 1))
521
- assert emb.shape == (w.shape[0], embedding_dim)
522
- return emb
523
-
524
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
525
- def prepare_extra_step_kwargs(self, generator, eta):
526
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
527
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
528
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
529
- # and should be between [0, 1]
530
-
531
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
532
- extra_step_kwargs = {}
533
- if accepts_eta:
534
- extra_step_kwargs["eta"] = eta
535
-
536
- # check if the scheduler accepts generator
537
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
538
- if accepts_generator:
539
- extra_step_kwargs["generator"] = generator
540
- return extra_step_kwargs
541
-
542
- # Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed
543
- def check_inputs(
544
- self,
545
- prompt: Union[str, List[str]],
546
- height: int,
547
- width: int,
548
- callback_steps: int,
549
- prompt_embeds: Optional[torch.Tensor] = None,
550
- callback_on_step_end_tensor_inputs=None,
551
- ):
552
- if height % 8 != 0 or width % 8 != 0:
553
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
554
-
555
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
556
- raise ValueError(
557
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
558
- f" {type(callback_steps)}."
559
- )
560
-
561
- if callback_on_step_end_tensor_inputs is not None and not all(
562
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
563
- ):
564
- raise ValueError(
565
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
566
- )
567
-
568
- if prompt is not None and prompt_embeds is not None:
569
- raise ValueError(
570
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
571
- " only forward one of the two."
572
- )
573
- elif prompt is None and prompt_embeds is None:
574
- raise ValueError(
575
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
576
- )
577
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
578
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
579
-
580
- @torch.no_grad()
581
- def interpolate_embedding(
582
- self,
583
- start_embedding: torch.Tensor,
584
- end_embedding: torch.Tensor,
585
- num_interpolation_steps: Union[int, List[int]],
586
- interpolation_type: str,
587
- ) -> torch.Tensor:
588
- if interpolation_type == "lerp":
589
- interpolation_fn = lerp
590
- elif interpolation_type == "slerp":
591
- interpolation_fn = slerp
592
- else:
593
- raise ValueError(
594
- f"embedding_interpolation_type must be one of ['lerp', 'slerp'], got {interpolation_type}."
595
- )
596
-
597
- embedding = torch.cat([start_embedding, end_embedding])
598
- steps = torch.linspace(0, 1, num_interpolation_steps, dtype=embedding.dtype).cpu().numpy()
599
- steps = np.expand_dims(steps, axis=tuple(range(1, embedding.ndim)))
600
- interpolations = []
601
-
602
- # Interpolate between text embeddings
603
- # TODO(aryan): Think of a better way of doing this
604
- # See if it can be done parallelly instead
605
- for i in range(embedding.shape[0] - 1):
606
- interpolations.append(interpolation_fn(embedding[i], embedding[i + 1], steps).squeeze(dim=1))
607
-
608
- interpolations = torch.cat(interpolations)
609
- return interpolations
610
-
611
- @torch.no_grad()
612
- def interpolate_latent(
613
- self,
614
- start_latent: torch.Tensor,
615
- end_latent: torch.Tensor,
616
- num_interpolation_steps: Union[int, List[int]],
617
- interpolation_type: str,
618
- ) -> torch.Tensor:
619
- if interpolation_type == "lerp":
620
- interpolation_fn = lerp
621
- elif interpolation_type == "slerp":
622
- interpolation_fn = slerp
623
-
624
- latent = torch.cat([start_latent, end_latent])
625
- steps = torch.linspace(0, 1, num_interpolation_steps, dtype=latent.dtype).cpu().numpy()
626
- steps = np.expand_dims(steps, axis=tuple(range(1, latent.ndim)))
627
- interpolations = []
628
-
629
- # Interpolate between latents
630
- # TODO: Think of a better way of doing this
631
- # See if it can be done parallelly instead
632
- for i in range(latent.shape[0] - 1):
633
- interpolations.append(interpolation_fn(latent[i], latent[i + 1], steps).squeeze(dim=1))
634
-
635
- return torch.cat(interpolations)
636
-
637
- @property
638
- def guidance_scale(self):
639
- return self._guidance_scale
640
-
641
- @property
642
- def cross_attention_kwargs(self):
643
- return self._cross_attention_kwargs
644
-
645
- @property
646
- def clip_skip(self):
647
- return self._clip_skip
648
-
649
- @property
650
- def num_timesteps(self):
651
- return self._num_timesteps
652
-
653
- @torch.no_grad()
654
- @replace_example_docstring(EXAMPLE_DOC_STRING)
655
- def __call__(
656
- self,
657
- prompt: Union[str, List[str]] = None,
658
- height: Optional[int] = None,
659
- width: Optional[int] = None,
660
- num_inference_steps: int = 4,
661
- num_interpolation_steps: int = 8,
662
- original_inference_steps: int = None,
663
- guidance_scale: float = 8.5,
664
- num_images_per_prompt: Optional[int] = 1,
665
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
666
- latents: Optional[torch.Tensor] = None,
667
- prompt_embeds: Optional[torch.Tensor] = None,
668
- output_type: Optional[str] = "pil",
669
- return_dict: bool = True,
670
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
671
- clip_skip: Optional[int] = None,
672
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
673
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
674
- embedding_interpolation_type: str = "lerp",
675
- latent_interpolation_type: str = "slerp",
676
- process_batch_size: int = 4,
677
- **kwargs,
678
- ):
679
- r"""
680
- The call function to the pipeline for generation.
681
-
682
- Args:
683
- prompt (`str` or `List[str]`, *optional*):
684
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
685
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
686
- The height in pixels of the generated image.
687
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
688
- The width in pixels of the generated image.
689
- num_inference_steps (`int`, *optional*, defaults to 50):
690
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
691
- expense of slower inference.
692
- original_inference_steps (`int`, *optional*):
693
- The original number of inference steps use to generate a linearly-spaced timestep schedule, from which
694
- we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule,
695
- following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the
696
- scheduler's `original_inference_steps` attribute.
697
- guidance_scale (`float`, *optional*, defaults to 7.5):
698
- A higher guidance scale value encourages the model to generate images closely linked to the text
699
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
700
- Note that the original latent consistency models paper uses a different CFG formulation where the
701
- guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale >
702
- 0`).
703
- num_images_per_prompt (`int`, *optional*, defaults to 1):
704
- The number of images to generate per prompt.
705
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
706
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
707
- generation deterministic.
708
- latents (`torch.Tensor`, *optional*):
709
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
710
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
711
- tensor is generated by sampling using the supplied random `generator`.
712
- prompt_embeds (`torch.Tensor`, *optional*):
713
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
714
- provided, text embeddings are generated from the `prompt` input argument.
715
- output_type (`str`, *optional*, defaults to `"pil"`):
716
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
717
- return_dict (`bool`, *optional*, defaults to `True`):
718
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
719
- plain tuple.
720
- cross_attention_kwargs (`dict`, *optional*):
721
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
722
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
723
- clip_skip (`int`, *optional*):
724
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
725
- the output of the pre-final layer will be used for computing the prompt embeddings.
726
- callback_on_step_end (`Callable`, *optional*):
727
- A function that calls at the end of each denoising steps during the inference. The function is called
728
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
729
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
730
- `callback_on_step_end_tensor_inputs`.
731
- callback_on_step_end_tensor_inputs (`List`, *optional*):
732
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
733
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
734
- `._callback_tensor_inputs` attribute of your pipeline class.
735
- embedding_interpolation_type (`str`, *optional*, defaults to `"lerp"`):
736
- The type of interpolation to use for interpolating between text embeddings. Choose between `"lerp"` and `"slerp"`.
737
- latent_interpolation_type (`str`, *optional*, defaults to `"slerp"`):
738
- The type of interpolation to use for interpolating between latents. Choose between `"lerp"` and `"slerp"`.
739
- process_batch_size (`int`, *optional*, defaults to 4):
740
- The batch size to use for processing the images. This is useful when generating a large number of images
741
- and you want to avoid running out of memory.
742
-
743
- Examples:
744
-
745
- Returns:
746
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
747
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
748
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
749
- second element is a list of `bool`s indicating whether the corresponding generated image contains
750
- "not-safe-for-work" (nsfw) content.
751
- """
752
-
753
- callback = kwargs.pop("callback", None)
754
- callback_steps = kwargs.pop("callback_steps", None)
755
-
756
- if callback is not None:
757
- deprecate(
758
- "callback",
759
- "1.0.0",
760
- "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
761
- )
762
- if callback_steps is not None:
763
- deprecate(
764
- "callback_steps",
765
- "1.0.0",
766
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
767
- )
768
-
769
- # 0. Default height and width to unet
770
- height = height or self.unet.config.sample_size * self.vae_scale_factor
771
- width = width or self.unet.config.sample_size * self.vae_scale_factor
772
-
773
- # 1. Check inputs. Raise error if not correct
774
- self.check_inputs(prompt, height, width, callback_steps, prompt_embeds, callback_on_step_end_tensor_inputs)
775
- self._guidance_scale = guidance_scale
776
- self._clip_skip = clip_skip
777
- self._cross_attention_kwargs = cross_attention_kwargs
778
-
779
- # 2. Define call parameters
780
- if prompt is not None and isinstance(prompt, str):
781
- batch_size = 1
782
- elif prompt is not None and isinstance(prompt, list):
783
- batch_size = len(prompt)
784
- else:
785
- batch_size = prompt_embeds.shape[0]
786
- if batch_size < 2:
787
- raise ValueError(f"`prompt` must have length of at least 2 but found {batch_size}")
788
- if num_images_per_prompt != 1:
789
- raise ValueError("`num_images_per_prompt` must be `1` as no other value is supported yet")
790
- if prompt_embeds is not None:
791
- raise ValueError("`prompt_embeds` must be None since it is not supported yet")
792
- if latents is not None:
793
- raise ValueError("`latents` must be None since it is not supported yet")
794
-
795
- device = self._execution_device
796
- # do_classifier_free_guidance = guidance_scale > 1.0
797
-
798
- lora_scale = (
799
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
800
- )
801
-
802
- self.scheduler.set_timesteps(num_inference_steps, device, original_inference_steps=original_inference_steps)
803
- timesteps = self.scheduler.timesteps
804
- num_channels_latents = self.unet.config.in_channels
805
- # bs = batch_size * num_images_per_prompt
806
-
807
- # 3. Encode initial input prompt
808
- prompt_embeds_1, _ = self.encode_prompt(
809
- prompt[:1],
810
- device,
811
- num_images_per_prompt=num_images_per_prompt,
812
- do_classifier_free_guidance=False,
813
- negative_prompt=None,
814
- prompt_embeds=prompt_embeds,
815
- negative_prompt_embeds=None,
816
- lora_scale=lora_scale,
817
- clip_skip=self.clip_skip,
818
- )
819
-
820
- # 4. Prepare initial latent variables
821
- latents_1 = self.prepare_latents(
822
- 1,
823
- num_channels_latents,
824
- height,
825
- width,
826
- prompt_embeds_1.dtype,
827
- device,
828
- generator,
829
- latents,
830
- )
831
-
832
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None)
833
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
834
- self._num_timesteps = len(timesteps)
835
- images = []
836
-
837
- # 5. Iterate over prompts and perform latent walk. Note that we do this two prompts at a time
838
- # otherwise the memory usage ends up being too high.
839
- with self.progress_bar(total=batch_size - 1) as prompt_progress_bar:
840
- for i in range(1, batch_size):
841
- # 6. Encode current prompt
842
- prompt_embeds_2, _ = self.encode_prompt(
843
- prompt[i : i + 1],
844
- device,
845
- num_images_per_prompt=num_images_per_prompt,
846
- do_classifier_free_guidance=False,
847
- negative_prompt=None,
848
- prompt_embeds=prompt_embeds,
849
- negative_prompt_embeds=None,
850
- lora_scale=lora_scale,
851
- clip_skip=self.clip_skip,
852
- )
853
-
854
- # 7. Prepare current latent variables
855
- latents_2 = self.prepare_latents(
856
- 1,
857
- num_channels_latents,
858
- height,
859
- width,
860
- prompt_embeds_2.dtype,
861
- device,
862
- generator,
863
- latents,
864
- )
865
-
866
- # 8. Interpolate between previous and current prompt embeddings and latents
867
- inference_embeddings = self.interpolate_embedding(
868
- start_embedding=prompt_embeds_1,
869
- end_embedding=prompt_embeds_2,
870
- num_interpolation_steps=num_interpolation_steps,
871
- interpolation_type=embedding_interpolation_type,
872
- )
873
- inference_latents = self.interpolate_latent(
874
- start_latent=latents_1,
875
- end_latent=latents_2,
876
- num_interpolation_steps=num_interpolation_steps,
877
- interpolation_type=latent_interpolation_type,
878
- )
879
- next_prompt_embeds = inference_embeddings[-1:].detach().clone()
880
- next_latents = inference_latents[-1:].detach().clone()
881
- bs = num_interpolation_steps
882
-
883
- # 9. Perform inference in batches. Note the use of `process_batch_size` to control the batch size
884
- # of the inference. This is useful for reducing memory usage and can be configured based on the
885
- # available GPU memory.
886
- with self.progress_bar(
887
- total=(bs + process_batch_size - 1) // process_batch_size
888
- ) as batch_progress_bar:
889
- for batch_index in range(0, bs, process_batch_size):
890
- batch_inference_latents = inference_latents[batch_index : batch_index + process_batch_size]
891
- batch_inference_embeddings = inference_embeddings[
892
- batch_index : batch_index + process_batch_size
893
- ]
894
-
895
- self.scheduler.set_timesteps(
896
- num_inference_steps, device, original_inference_steps=original_inference_steps
897
- )
898
- timesteps = self.scheduler.timesteps
899
-
900
- current_bs = batch_inference_embeddings.shape[0]
901
- w = torch.tensor(self.guidance_scale - 1).repeat(current_bs)
902
- w_embedding = self.get_guidance_scale_embedding(
903
- w, embedding_dim=self.unet.config.time_cond_proj_dim
904
- ).to(device=device, dtype=latents_1.dtype)
905
-
906
- # 10. Perform inference for current batch
907
- with self.progress_bar(total=num_inference_steps) as progress_bar:
908
- for index, t in enumerate(timesteps):
909
- batch_inference_latents = batch_inference_latents.to(batch_inference_embeddings.dtype)
910
-
911
- # model prediction (v-prediction, eps, x)
912
- model_pred = self.unet(
913
- batch_inference_latents,
914
- t,
915
- timestep_cond=w_embedding,
916
- encoder_hidden_states=batch_inference_embeddings,
917
- cross_attention_kwargs=self.cross_attention_kwargs,
918
- return_dict=False,
919
- )[0]
920
-
921
- # compute the previous noisy sample x_t -> x_t-1
922
- batch_inference_latents, denoised = self.scheduler.step(
923
- model_pred, t, batch_inference_latents, **extra_step_kwargs, return_dict=False
924
- )
925
- if callback_on_step_end is not None:
926
- callback_kwargs = {}
927
- for k in callback_on_step_end_tensor_inputs:
928
- callback_kwargs[k] = locals()[k]
929
- callback_outputs = callback_on_step_end(self, index, t, callback_kwargs)
930
-
931
- batch_inference_latents = callback_outputs.pop("latents", batch_inference_latents)
932
- batch_inference_embeddings = callback_outputs.pop(
933
- "prompt_embeds", batch_inference_embeddings
934
- )
935
- w_embedding = callback_outputs.pop("w_embedding", w_embedding)
936
- denoised = callback_outputs.pop("denoised", denoised)
937
-
938
- # call the callback, if provided
939
- if index == len(timesteps) - 1 or (
940
- (index + 1) > num_warmup_steps and (index + 1) % self.scheduler.order == 0
941
- ):
942
- progress_bar.update()
943
- if callback is not None and index % callback_steps == 0:
944
- step_idx = index // getattr(self.scheduler, "order", 1)
945
- callback(step_idx, t, batch_inference_latents)
946
-
947
- denoised = denoised.to(batch_inference_embeddings.dtype)
948
-
949
- # Note: This is not supported because you would get black images in your latent walk if
950
- # NSFW concept is detected
951
- # if not output_type == "latent":
952
- # image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
953
- # image, has_nsfw_concept = self.run_safety_checker(image, device, inference_embeddings.dtype)
954
- # else:
955
- # image = denoised
956
- # has_nsfw_concept = None
957
-
958
- # if has_nsfw_concept is None:
959
- # do_denormalize = [True] * image.shape[0]
960
- # else:
961
- # do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
962
-
963
- image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
964
- do_denormalize = [True] * image.shape[0]
965
- has_nsfw_concept = None
966
-
967
- image = self.image_processor.postprocess(
968
- image, output_type=output_type, do_denormalize=do_denormalize
969
- )
970
- images.append(image)
971
-
972
- batch_progress_bar.update()
973
-
974
- prompt_embeds_1 = next_prompt_embeds
975
- latents_1 = next_latents
976
-
977
- prompt_progress_bar.update()
978
-
979
- # 11. Determine what should be returned
980
- if output_type == "pil":
981
- images = [image for image_list in images for image in image_list]
982
- elif output_type == "np":
983
- images = np.concatenate(images)
984
- elif output_type == "pt":
985
- images = torch.cat(images)
986
- else:
987
- raise ValueError("`output_type` must be one of 'pil', 'np' or 'pt'.")
988
-
989
- # Offload all models
990
- self.maybe_free_model_hooks()
991
-
992
- if not return_dict:
993
- return (images, has_nsfw_concept)
994
-
995
- return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
latent_consistency_txt2img.py DELETED
@@ -1,729 +0,0 @@
1
- # Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
- # and https://github.com/hojonathanho/diffusion
17
-
18
- import math
19
- from dataclasses import dataclass
20
- from typing import Any, Dict, List, Optional, Tuple, Union
21
-
22
- import numpy as np
23
- import torch
24
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
25
-
26
- from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
27
- from diffusers.configuration_utils import register_to_config
28
- from diffusers.image_processor import VaeImageProcessor
29
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
30
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
31
- from diffusers.utils import BaseOutput
32
-
33
-
34
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
-
36
-
37
- class LatentConsistencyModelPipeline(DiffusionPipeline):
38
- _optional_components = ["scheduler"]
39
-
40
- def __init__(
41
- self,
42
- vae: AutoencoderKL,
43
- text_encoder: CLIPTextModel,
44
- tokenizer: CLIPTokenizer,
45
- unet: UNet2DConditionModel,
46
- scheduler: "LCMScheduler",
47
- safety_checker: StableDiffusionSafetyChecker,
48
- feature_extractor: CLIPImageProcessor,
49
- requires_safety_checker: bool = True,
50
- ):
51
- super().__init__()
52
-
53
- scheduler = (
54
- scheduler
55
- if scheduler is not None
56
- else LCMScheduler(
57
- beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
58
- )
59
- )
60
-
61
- self.register_modules(
62
- vae=vae,
63
- text_encoder=text_encoder,
64
- tokenizer=tokenizer,
65
- unet=unet,
66
- scheduler=scheduler,
67
- safety_checker=safety_checker,
68
- feature_extractor=feature_extractor,
69
- )
70
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
71
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
72
-
73
- def _encode_prompt(
74
- self,
75
- prompt,
76
- device,
77
- num_images_per_prompt,
78
- prompt_embeds: None,
79
- ):
80
- r"""
81
- Encodes the prompt into text encoder hidden states.
82
- Args:
83
- prompt (`str` or `List[str]`, *optional*):
84
- prompt to be encoded
85
- device: (`torch.device`):
86
- torch device
87
- num_images_per_prompt (`int`):
88
- number of images that should be generated per prompt
89
- prompt_embeds (`torch.Tensor`, *optional*):
90
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
91
- provided, text embeddings will be generated from `prompt` input argument.
92
- """
93
-
94
- if prompt is not None and isinstance(prompt, str):
95
- pass
96
- elif prompt is not None and isinstance(prompt, list):
97
- len(prompt)
98
- else:
99
- prompt_embeds.shape[0]
100
-
101
- if prompt_embeds is None:
102
- text_inputs = self.tokenizer(
103
- prompt,
104
- padding="max_length",
105
- max_length=self.tokenizer.model_max_length,
106
- truncation=True,
107
- return_tensors="pt",
108
- )
109
- text_input_ids = text_inputs.input_ids
110
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
111
-
112
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
113
- text_input_ids, untruncated_ids
114
- ):
115
- removed_text = self.tokenizer.batch_decode(
116
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
117
- )
118
- logger.warning(
119
- "The following part of your input was truncated because CLIP can only handle sequences up to"
120
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
121
- )
122
-
123
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
124
- attention_mask = text_inputs.attention_mask.to(device)
125
- else:
126
- attention_mask = None
127
-
128
- prompt_embeds = self.text_encoder(
129
- text_input_ids.to(device),
130
- attention_mask=attention_mask,
131
- )
132
- prompt_embeds = prompt_embeds[0]
133
-
134
- if self.text_encoder is not None:
135
- prompt_embeds_dtype = self.text_encoder.dtype
136
- elif self.unet is not None:
137
- prompt_embeds_dtype = self.unet.dtype
138
- else:
139
- prompt_embeds_dtype = prompt_embeds.dtype
140
-
141
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
142
-
143
- bs_embed, seq_len, _ = prompt_embeds.shape
144
- # duplicate text embeddings for each generation per prompt, using mps friendly method
145
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
146
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
147
-
148
- # Don't need to get uncond prompt embedding because of LCM Guided Distillation
149
- return prompt_embeds
150
-
151
- def run_safety_checker(self, image, device, dtype):
152
- if self.safety_checker is None:
153
- has_nsfw_concept = None
154
- else:
155
- if torch.is_tensor(image):
156
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
157
- else:
158
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
159
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
160
- image, has_nsfw_concept = self.safety_checker(
161
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
162
- )
163
- return image, has_nsfw_concept
164
-
165
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):
166
- shape = (
167
- batch_size,
168
- num_channels_latents,
169
- int(height) // self.vae_scale_factor,
170
- int(width) // self.vae_scale_factor,
171
- )
172
- if latents is None:
173
- latents = torch.randn(shape, dtype=dtype).to(device)
174
- else:
175
- latents = latents.to(device)
176
- # scale the initial noise by the standard deviation required by the scheduler
177
- latents = latents * self.scheduler.init_noise_sigma
178
- return latents
179
-
180
- def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
181
- """
182
- see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
183
- Args:
184
- timesteps: torch.Tensor: generate embedding vectors at these timesteps
185
- embedding_dim: int: dimension of the embeddings to generate
186
- dtype: data type of the generated embeddings
187
- Returns:
188
- embedding vectors with shape `(len(timesteps), embedding_dim)`
189
- """
190
- assert len(w.shape) == 1
191
- w = w * 1000.0
192
-
193
- half_dim = embedding_dim // 2
194
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
195
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
196
- emb = w.to(dtype)[:, None] * emb[None, :]
197
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
198
- if embedding_dim % 2 == 1: # zero pad
199
- emb = torch.nn.functional.pad(emb, (0, 1))
200
- assert emb.shape == (w.shape[0], embedding_dim)
201
- return emb
202
-
203
- @torch.no_grad()
204
- def __call__(
205
- self,
206
- prompt: Union[str, List[str]] = None,
207
- height: Optional[int] = 768,
208
- width: Optional[int] = 768,
209
- guidance_scale: float = 7.5,
210
- num_images_per_prompt: Optional[int] = 1,
211
- latents: Optional[torch.Tensor] = None,
212
- num_inference_steps: int = 4,
213
- lcm_origin_steps: int = 50,
214
- prompt_embeds: Optional[torch.Tensor] = None,
215
- output_type: Optional[str] = "pil",
216
- return_dict: bool = True,
217
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
218
- ):
219
- # 0. Default height and width to unet
220
- height = height or self.unet.config.sample_size * self.vae_scale_factor
221
- width = width or self.unet.config.sample_size * self.vae_scale_factor
222
-
223
- # 2. Define call parameters
224
- if prompt is not None and isinstance(prompt, str):
225
- batch_size = 1
226
- elif prompt is not None and isinstance(prompt, list):
227
- batch_size = len(prompt)
228
- else:
229
- batch_size = prompt_embeds.shape[0]
230
-
231
- device = self._execution_device
232
- # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
233
-
234
- # 3. Encode input prompt
235
- prompt_embeds = self._encode_prompt(
236
- prompt,
237
- device,
238
- num_images_per_prompt,
239
- prompt_embeds=prompt_embeds,
240
- )
241
-
242
- # 4. Prepare timesteps
243
- self.scheduler.set_timesteps(num_inference_steps, lcm_origin_steps)
244
- timesteps = self.scheduler.timesteps
245
-
246
- # 5. Prepare latent variable
247
- num_channels_latents = self.unet.config.in_channels
248
- latents = self.prepare_latents(
249
- batch_size * num_images_per_prompt,
250
- num_channels_latents,
251
- height,
252
- width,
253
- prompt_embeds.dtype,
254
- device,
255
- latents,
256
- )
257
- bs = batch_size * num_images_per_prompt
258
-
259
- # 6. Get Guidance Scale Embedding
260
- w = torch.tensor(guidance_scale).repeat(bs)
261
- w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
262
-
263
- # 7. LCM MultiStep Sampling Loop:
264
- with self.progress_bar(total=num_inference_steps) as progress_bar:
265
- for i, t in enumerate(timesteps):
266
- ts = torch.full((bs,), t, device=device, dtype=torch.long)
267
- latents = latents.to(prompt_embeds.dtype)
268
-
269
- # model prediction (v-prediction, eps, x)
270
- model_pred = self.unet(
271
- latents,
272
- ts,
273
- timestep_cond=w_embedding,
274
- encoder_hidden_states=prompt_embeds,
275
- cross_attention_kwargs=cross_attention_kwargs,
276
- return_dict=False,
277
- )[0]
278
-
279
- # compute the previous noisy sample x_t -> x_t-1
280
- latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
281
-
282
- # # call the callback, if provided
283
- # if i == len(timesteps) - 1:
284
- progress_bar.update()
285
-
286
- denoised = denoised.to(prompt_embeds.dtype)
287
- if not output_type == "latent":
288
- image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
289
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
290
- else:
291
- image = denoised
292
- has_nsfw_concept = None
293
-
294
- if has_nsfw_concept is None:
295
- do_denormalize = [True] * image.shape[0]
296
- else:
297
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
298
-
299
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
300
-
301
- if not return_dict:
302
- return (image, has_nsfw_concept)
303
-
304
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
305
-
306
-
307
- @dataclass
308
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
309
- class LCMSchedulerOutput(BaseOutput):
310
- """
311
- Output class for the scheduler's `step` function output.
312
- Args:
313
- prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
314
- Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
315
- denoising loop.
316
- pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
317
- The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
318
- `pred_original_sample` can be used to preview progress or for guidance.
319
- """
320
-
321
- prev_sample: torch.Tensor
322
- denoised: Optional[torch.Tensor] = None
323
-
324
-
325
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
326
- def betas_for_alpha_bar(
327
- num_diffusion_timesteps,
328
- max_beta=0.999,
329
- alpha_transform_type="cosine",
330
- ):
331
- """
332
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
333
- (1-beta) over time from t = [0,1].
334
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
335
- to that part of the diffusion process.
336
- Args:
337
- num_diffusion_timesteps (`int`): the number of betas to produce.
338
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
339
- prevent singularities.
340
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
341
- Choose from `cosine` or `exp`
342
- Returns:
343
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
344
- """
345
- if alpha_transform_type == "cosine":
346
-
347
- def alpha_bar_fn(t):
348
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
349
-
350
- elif alpha_transform_type == "exp":
351
-
352
- def alpha_bar_fn(t):
353
- return math.exp(t * -12.0)
354
-
355
- else:
356
- raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
357
-
358
- betas = []
359
- for i in range(num_diffusion_timesteps):
360
- t1 = i / num_diffusion_timesteps
361
- t2 = (i + 1) / num_diffusion_timesteps
362
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
363
- return torch.tensor(betas, dtype=torch.float32)
364
-
365
-
366
- def rescale_zero_terminal_snr(betas):
367
- """
368
- Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
369
- Args:
370
- betas (`torch.Tensor`):
371
- the betas that the scheduler is being initialized with.
372
- Returns:
373
- `torch.Tensor`: rescaled betas with zero terminal SNR
374
- """
375
- # Convert betas to alphas_bar_sqrt
376
- alphas = 1.0 - betas
377
- alphas_cumprod = torch.cumprod(alphas, dim=0)
378
- alphas_bar_sqrt = alphas_cumprod.sqrt()
379
-
380
- # Store old values.
381
- alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
382
- alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
383
-
384
- # Shift so the last timestep is zero.
385
- alphas_bar_sqrt -= alphas_bar_sqrt_T
386
-
387
- # Scale so the first timestep is back to the old value.
388
- alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
389
-
390
- # Convert alphas_bar_sqrt to betas
391
- alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
392
- alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
393
- alphas = torch.cat([alphas_bar[0:1], alphas])
394
- betas = 1 - alphas
395
-
396
- return betas
397
-
398
-
399
- class LCMScheduler(SchedulerMixin, ConfigMixin):
400
- """
401
- `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
402
- non-Markovian guidance.
403
- This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
404
- methods the library implements for all schedulers such as loading and saving.
405
- Args:
406
- num_train_timesteps (`int`, defaults to 1000):
407
- The number of diffusion steps to train the model.
408
- beta_start (`float`, defaults to 0.0001):
409
- The starting `beta` value of inference.
410
- beta_end (`float`, defaults to 0.02):
411
- The final `beta` value.
412
- beta_schedule (`str`, defaults to `"linear"`):
413
- The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
414
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
415
- trained_betas (`np.ndarray`, *optional*):
416
- Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
417
- clip_sample (`bool`, defaults to `True`):
418
- Clip the predicted sample for numerical stability.
419
- clip_sample_range (`float`, defaults to 1.0):
420
- The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
421
- set_alpha_to_one (`bool`, defaults to `True`):
422
- Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
423
- there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
424
- otherwise it uses the alpha value at step 0.
425
- steps_offset (`int`, defaults to 0):
426
- An offset added to the inference steps, as required by some model families.
427
- prediction_type (`str`, defaults to `epsilon`, *optional*):
428
- Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
429
- `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
430
- Video](https://imagen.research.google/video/paper.pdf) paper).
431
- thresholding (`bool`, defaults to `False`):
432
- Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
433
- as Stable Diffusion.
434
- dynamic_thresholding_ratio (`float`, defaults to 0.995):
435
- The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
436
- sample_max_value (`float`, defaults to 1.0):
437
- The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
438
- timestep_spacing (`str`, defaults to `"leading"`):
439
- The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
440
- Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
441
- rescale_betas_zero_snr (`bool`, defaults to `False`):
442
- Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
443
- dark samples instead of limiting it to samples with medium brightness. Loosely related to
444
- [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
445
- """
446
-
447
- # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
448
- order = 1
449
-
450
- @register_to_config
451
- def __init__(
452
- self,
453
- num_train_timesteps: int = 1000,
454
- beta_start: float = 0.0001,
455
- beta_end: float = 0.02,
456
- beta_schedule: str = "linear",
457
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
458
- clip_sample: bool = True,
459
- set_alpha_to_one: bool = True,
460
- steps_offset: int = 0,
461
- prediction_type: str = "epsilon",
462
- thresholding: bool = False,
463
- dynamic_thresholding_ratio: float = 0.995,
464
- clip_sample_range: float = 1.0,
465
- sample_max_value: float = 1.0,
466
- timestep_spacing: str = "leading",
467
- rescale_betas_zero_snr: bool = False,
468
- ):
469
- if trained_betas is not None:
470
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
471
- elif beta_schedule == "linear":
472
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
473
- elif beta_schedule == "scaled_linear":
474
- # this schedule is very specific to the latent diffusion model.
475
- self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
476
- elif beta_schedule == "squaredcos_cap_v2":
477
- # Glide cosine schedule
478
- self.betas = betas_for_alpha_bar(num_train_timesteps)
479
- else:
480
- raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
481
-
482
- # Rescale for zero SNR
483
- if rescale_betas_zero_snr:
484
- self.betas = rescale_zero_terminal_snr(self.betas)
485
-
486
- self.alphas = 1.0 - self.betas
487
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
488
-
489
- # At every step in ddim, we are looking into the previous alphas_cumprod
490
- # For the final step, there is no previous alphas_cumprod because we are already at 0
491
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
492
- # whether we use the final alpha of the "non-previous" one.
493
- self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
494
-
495
- # standard deviation of the initial noise distribution
496
- self.init_noise_sigma = 1.0
497
-
498
- # setable values
499
- self.num_inference_steps = None
500
- self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
501
-
502
- def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
503
- """
504
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
505
- current timestep.
506
- Args:
507
- sample (`torch.Tensor`):
508
- The input sample.
509
- timestep (`int`, *optional*):
510
- The current timestep in the diffusion chain.
511
- Returns:
512
- `torch.Tensor`:
513
- A scaled input sample.
514
- """
515
- return sample
516
-
517
- def _get_variance(self, timestep, prev_timestep):
518
- alpha_prod_t = self.alphas_cumprod[timestep]
519
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
520
- beta_prod_t = 1 - alpha_prod_t
521
- beta_prod_t_prev = 1 - alpha_prod_t_prev
522
-
523
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
524
-
525
- return variance
526
-
527
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
528
- def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
529
- """
530
- "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
531
- prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
532
- s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
533
- pixels from saturation at each step. We find that dynamic thresholding results in significantly better
534
- photorealism as well as better image-text alignment, especially when using very large guidance weights."
535
- https://arxiv.org/abs/2205.11487
536
- """
537
- dtype = sample.dtype
538
- batch_size, channels, height, width = sample.shape
539
-
540
- if dtype not in (torch.float32, torch.float64):
541
- sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
542
-
543
- # Flatten sample for doing quantile calculation along each image
544
- sample = sample.reshape(batch_size, channels * height * width)
545
-
546
- abs_sample = sample.abs() # "a certain percentile absolute pixel value"
547
-
548
- s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
549
- s = torch.clamp(
550
- s, min=1, max=self.config.sample_max_value
551
- ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
552
-
553
- s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
554
- sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
555
-
556
- sample = sample.reshape(batch_size, channels, height, width)
557
- sample = sample.to(dtype)
558
-
559
- return sample
560
-
561
- def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):
562
- """
563
- Sets the discrete timesteps used for the diffusion chain (to be run before inference).
564
- Args:
565
- num_inference_steps (`int`):
566
- The number of diffusion steps used when generating samples with a pre-trained model.
567
- """
568
-
569
- if num_inference_steps > self.config.num_train_timesteps:
570
- raise ValueError(
571
- f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
572
- f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
573
- f" maximal {self.config.num_train_timesteps} timesteps."
574
- )
575
-
576
- self.num_inference_steps = num_inference_steps
577
-
578
- # LCM Timesteps Setting: # Linear Spacing
579
- c = self.config.num_train_timesteps // lcm_origin_steps
580
- lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule
581
- skipping_step = len(lcm_origin_timesteps) // num_inference_steps
582
- timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
583
-
584
- self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
585
-
586
- def get_scalings_for_boundary_condition_discrete(self, t):
587
- self.sigma_data = 0.5 # Default: 0.5
588
-
589
- # By dividing 0.1: This is almost a delta function at t=0.
590
- c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
591
- c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
592
- return c_skip, c_out
593
-
594
- def step(
595
- self,
596
- model_output: torch.Tensor,
597
- timeindex: int,
598
- timestep: int,
599
- sample: torch.Tensor,
600
- eta: float = 0.0,
601
- use_clipped_model_output: bool = False,
602
- generator=None,
603
- variance_noise: Optional[torch.Tensor] = None,
604
- return_dict: bool = True,
605
- ) -> Union[LCMSchedulerOutput, Tuple]:
606
- """
607
- Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
608
- process from the learned model outputs (most often the predicted noise).
609
- Args:
610
- model_output (`torch.Tensor`):
611
- The direct output from learned diffusion model.
612
- timestep (`float`):
613
- The current discrete timestep in the diffusion chain.
614
- sample (`torch.Tensor`):
615
- A current instance of a sample created by the diffusion process.
616
- eta (`float`):
617
- The weight of noise for added noise in diffusion step.
618
- use_clipped_model_output (`bool`, defaults to `False`):
619
- If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
620
- because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
621
- clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
622
- `use_clipped_model_output` has no effect.
623
- generator (`torch.Generator`, *optional*):
624
- A random number generator.
625
- variance_noise (`torch.Tensor`):
626
- Alternative to generating noise with `generator` by directly providing the noise for the variance
627
- itself. Useful for methods such as [`CycleDiffusion`].
628
- return_dict (`bool`, *optional*, defaults to `True`):
629
- Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
630
- Returns:
631
- [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
632
- If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
633
- tuple is returned where the first element is the sample tensor.
634
- """
635
- if self.num_inference_steps is None:
636
- raise ValueError(
637
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
638
- )
639
-
640
- # 1. get previous step value
641
- prev_timeindex = timeindex + 1
642
- if prev_timeindex < len(self.timesteps):
643
- prev_timestep = self.timesteps[prev_timeindex]
644
- else:
645
- prev_timestep = timestep
646
-
647
- # 2. compute alphas, betas
648
- alpha_prod_t = self.alphas_cumprod[timestep]
649
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
650
-
651
- beta_prod_t = 1 - alpha_prod_t
652
- beta_prod_t_prev = 1 - alpha_prod_t_prev
653
-
654
- # 3. Get scalings for boundary conditions
655
- c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
656
-
657
- # 4. Different Parameterization:
658
- parameterization = self.config.prediction_type
659
-
660
- if parameterization == "epsilon": # noise-prediction
661
- pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
662
-
663
- elif parameterization == "sample": # x-prediction
664
- pred_x0 = model_output
665
-
666
- elif parameterization == "v_prediction": # v-prediction
667
- pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
668
-
669
- # 4. Denoise model output using boundary conditions
670
- denoised = c_out * pred_x0 + c_skip * sample
671
-
672
- # 5. Sample z ~ N(0, I), For MultiStep Inference
673
- # Noise is not used for one-step sampling.
674
- if len(self.timesteps) > 1:
675
- noise = torch.randn(model_output.shape).to(model_output.device)
676
- prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
677
- else:
678
- prev_sample = denoised
679
-
680
- if not return_dict:
681
- return (prev_sample, denoised)
682
-
683
- return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
684
-
685
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
686
- def add_noise(
687
- self,
688
- original_samples: torch.Tensor,
689
- noise: torch.Tensor,
690
- timesteps: torch.IntTensor,
691
- ) -> torch.Tensor:
692
- # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
693
- alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
694
- timesteps = timesteps.to(original_samples.device)
695
-
696
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
697
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
698
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
699
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
700
-
701
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
702
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
703
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
704
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
705
-
706
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
707
- return noisy_samples
708
-
709
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
710
- def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor:
711
- # Make sure alphas_cumprod and timestep have same device and dtype as sample
712
- alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
713
- timesteps = timesteps.to(sample.device)
714
-
715
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
716
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
717
- while len(sqrt_alpha_prod.shape) < len(sample.shape):
718
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
719
-
720
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
721
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
722
- while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
723
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
724
-
725
- velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
726
- return velocity
727
-
728
- def __len__(self):
729
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llm_grounded_diffusion.py DELETED
@@ -1,1558 +0,0 @@
1
- # Copyright 2024 Long Lian, the GLIGEN Authors, and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # This is a single file implementation of LMD+. See README.md for examples.
16
-
17
- import ast
18
- import gc
19
- import inspect
20
- import math
21
- import warnings
22
- from collections.abc import Iterable
23
- from typing import Any, Callable, Dict, List, Optional, Union
24
-
25
- import torch
26
- import torch.nn.functional as F
27
- from packaging import version
28
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
29
-
30
- from diffusers.configuration_utils import FrozenDict
31
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
32
- from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
33
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
34
- from diffusers.models.attention import Attention, GatedSelfAttentionDense
35
- from diffusers.models.attention_processor import AttnProcessor2_0
36
- from diffusers.models.lora import adjust_lora_scale_text_encoder
37
- from diffusers.pipelines import DiffusionPipeline
38
- from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
39
- from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
40
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
41
- from diffusers.schedulers import KarrasDiffusionSchedulers
42
- from diffusers.utils import (
43
- USE_PEFT_BACKEND,
44
- deprecate,
45
- logging,
46
- replace_example_docstring,
47
- scale_lora_layers,
48
- unscale_lora_layers,
49
- )
50
- from diffusers.utils.torch_utils import randn_tensor
51
-
52
-
53
- EXAMPLE_DOC_STRING = """
54
- Examples:
55
- ```py
56
- >>> import torch
57
- >>> from diffusers import DiffusionPipeline
58
-
59
- >>> pipe = DiffusionPipeline.from_pretrained(
60
- ... "longlian/lmd_plus",
61
- ... custom_pipeline="llm_grounded_diffusion",
62
- ... custom_revision="main",
63
- ... variant="fp16", torch_dtype=torch.float16
64
- ... )
65
- >>> pipe.enable_model_cpu_offload()
66
-
67
- >>> # Generate an image described by the prompt and
68
- >>> # insert objects described by text at the region defined by bounding boxes
69
- >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
70
- >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]]
71
- >>> phrases = ["a waterfall", "a modern high speed train"]
72
-
73
- >>> images = pipe(
74
- ... prompt=prompt,
75
- ... phrases=phrases,
76
- ... boxes=boxes,
77
- ... gligen_scheduled_sampling_beta=0.4,
78
- ... output_type="pil",
79
- ... num_inference_steps=50,
80
- ... lmd_guidance_kwargs={}
81
- ... ).images
82
-
83
- >>> images[0].save("./lmd_plus_generation.jpg")
84
-
85
- >>> # Generate directly from a text prompt and an LLM response
86
- >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
87
- >>> phrases, boxes, bg_prompt, neg_prompt = pipe.parse_llm_response(\"""
88
- [('a waterfall', [71, 105, 148, 258]), ('a modern high speed train', [255, 223, 181, 149])]
89
- Background prompt: A beautiful forest with fall foliage
90
- Negative prompt:
91
- \""")
92
-
93
- >> images = pipe(
94
- ... prompt=prompt,
95
- ... negative_prompt=neg_prompt,
96
- ... phrases=phrases,
97
- ... boxes=boxes,
98
- ... gligen_scheduled_sampling_beta=0.4,
99
- ... output_type="pil",
100
- ... num_inference_steps=50,
101
- ... lmd_guidance_kwargs={}
102
- ... ).images
103
-
104
- >>> images[0].save("./lmd_plus_generation.jpg")
105
-
106
- images[0]
107
-
108
- ```
109
- """
110
-
111
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
112
-
113
- # All keys in Stable Diffusion models: [('down', 0, 0, 0), ('down', 0, 1, 0), ('down', 1, 0, 0), ('down', 1, 1, 0), ('down', 2, 0, 0), ('down', 2, 1, 0), ('mid', 0, 0, 0), ('up', 1, 0, 0), ('up', 1, 1, 0), ('up', 1, 2, 0), ('up', 2, 0, 0), ('up', 2, 1, 0), ('up', 2, 2, 0), ('up', 3, 0, 0), ('up', 3, 1, 0), ('up', 3, 2, 0)]
114
- # Note that the first up block is `UpBlock2D` rather than `CrossAttnUpBlock2D` and does not have attention. The last index is always 0 in our case since we have one `BasicTransformerBlock` in each `Transformer2DModel`.
115
- DEFAULT_GUIDANCE_ATTN_KEYS = [
116
- ("mid", 0, 0, 0),
117
- ("up", 1, 0, 0),
118
- ("up", 1, 1, 0),
119
- ("up", 1, 2, 0),
120
- ]
121
-
122
-
123
- def convert_attn_keys(key):
124
- """Convert the attention key from tuple format to the torch state format"""
125
-
126
- if key[0] == "mid":
127
- assert key[1] == 0, f"mid block only has one block but the index is {key[1]}"
128
- return f"{key[0]}_block.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
129
-
130
- return f"{key[0]}_blocks.{key[1]}.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
131
-
132
-
133
- DEFAULT_GUIDANCE_ATTN_KEYS = [convert_attn_keys(key) for key in DEFAULT_GUIDANCE_ATTN_KEYS]
134
-
135
-
136
- def scale_proportion(obj_box, H, W):
137
- # Separately rounding box_w and box_h to allow shift invariant box sizes. Otherwise box sizes may change when both coordinates being rounded end with ".5".
138
- x_min, y_min = round(obj_box[0] * W), round(obj_box[1] * H)
139
- box_w, box_h = round((obj_box[2] - obj_box[0]) * W), round((obj_box[3] - obj_box[1]) * H)
140
- x_max, y_max = x_min + box_w, y_min + box_h
141
-
142
- x_min, y_min = max(x_min, 0), max(y_min, 0)
143
- x_max, y_max = min(x_max, W), min(y_max, H)
144
-
145
- return x_min, y_min, x_max, y_max
146
-
147
-
148
- # Adapted from the parent class `AttnProcessor2_0`
149
- class AttnProcessorWithHook(AttnProcessor2_0):
150
- def __init__(
151
- self,
152
- attn_processor_key,
153
- hidden_size,
154
- cross_attention_dim,
155
- hook=None,
156
- fast_attn=True,
157
- enabled=True,
158
- ):
159
- super().__init__()
160
- self.attn_processor_key = attn_processor_key
161
- self.hidden_size = hidden_size
162
- self.cross_attention_dim = cross_attention_dim
163
- self.hook = hook
164
- self.fast_attn = fast_attn
165
- self.enabled = enabled
166
-
167
- def __call__(
168
- self,
169
- attn: Attention,
170
- hidden_states,
171
- encoder_hidden_states=None,
172
- attention_mask=None,
173
- temb=None,
174
- scale: float = 1.0,
175
- ):
176
- residual = hidden_states
177
-
178
- if attn.spatial_norm is not None:
179
- hidden_states = attn.spatial_norm(hidden_states, temb)
180
-
181
- input_ndim = hidden_states.ndim
182
-
183
- if input_ndim == 4:
184
- batch_size, channel, height, width = hidden_states.shape
185
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
186
-
187
- batch_size, sequence_length, _ = (
188
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
189
- )
190
-
191
- if attention_mask is not None:
192
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
193
-
194
- if attn.group_norm is not None:
195
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
196
-
197
- args = () if USE_PEFT_BACKEND else (scale,)
198
- query = attn.to_q(hidden_states, *args)
199
-
200
- if encoder_hidden_states is None:
201
- encoder_hidden_states = hidden_states
202
- elif attn.norm_cross:
203
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
204
-
205
- key = attn.to_k(encoder_hidden_states, *args)
206
- value = attn.to_v(encoder_hidden_states, *args)
207
-
208
- inner_dim = key.shape[-1]
209
- head_dim = inner_dim // attn.heads
210
-
211
- if (self.hook is not None and self.enabled) or not self.fast_attn:
212
- query_batch_dim = attn.head_to_batch_dim(query)
213
- key_batch_dim = attn.head_to_batch_dim(key)
214
- value_batch_dim = attn.head_to_batch_dim(value)
215
- attention_probs = attn.get_attention_scores(query_batch_dim, key_batch_dim, attention_mask)
216
-
217
- if self.hook is not None and self.enabled:
218
- # Call the hook with query, key, value, and attention maps
219
- self.hook(
220
- self.attn_processor_key,
221
- query_batch_dim,
222
- key_batch_dim,
223
- value_batch_dim,
224
- attention_probs,
225
- )
226
-
227
- if self.fast_attn:
228
- query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
229
-
230
- key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
231
- value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
232
-
233
- if attention_mask is not None:
234
- # scaled_dot_product_attention expects attention_mask shape to be
235
- # (batch, heads, source_length, target_length)
236
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
237
-
238
- # the output of sdp = (batch, num_heads, seq_len, head_dim)
239
- # TODO: add support for attn.scale when we move to Torch 2.1
240
- hidden_states = F.scaled_dot_product_attention(
241
- query,
242
- key,
243
- value,
244
- attn_mask=attention_mask,
245
- dropout_p=0.0,
246
- is_causal=False,
247
- )
248
- hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
249
- hidden_states = hidden_states.to(query.dtype)
250
- else:
251
- hidden_states = torch.bmm(attention_probs, value)
252
- hidden_states = attn.batch_to_head_dim(hidden_states)
253
-
254
- # linear proj
255
- hidden_states = attn.to_out[0](hidden_states, *args)
256
- # dropout
257
- hidden_states = attn.to_out[1](hidden_states)
258
-
259
- if input_ndim == 4:
260
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
261
-
262
- if attn.residual_connection:
263
- hidden_states = hidden_states + residual
264
-
265
- hidden_states = hidden_states / attn.rescale_output_factor
266
-
267
- return hidden_states
268
-
269
-
270
- class LLMGroundedDiffusionPipeline(
271
- DiffusionPipeline,
272
- StableDiffusionMixin,
273
- TextualInversionLoaderMixin,
274
- LoraLoaderMixin,
275
- IPAdapterMixin,
276
- FromSingleFileMixin,
277
- ):
278
- r"""
279
- Pipeline for layout-grounded text-to-image generation using LLM-grounded Diffusion (LMD+): https://arxiv.org/pdf/2305.13655.pdf.
280
-
281
- This model inherits from [`StableDiffusionPipeline`] and aims at implementing the pipeline with minimal modifications. Check the superclass documentation for the generic methods
282
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
283
-
284
- This is a simplified implementation that does not perform latent or attention transfer from single object generation to overall generation. The final image is generated directly with attention and adapters control.
285
-
286
- Args:
287
- vae ([`AutoencoderKL`]):
288
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
289
- text_encoder ([`~transformers.CLIPTextModel`]):
290
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
291
- tokenizer ([`~transformers.CLIPTokenizer`]):
292
- A `CLIPTokenizer` to tokenize text.
293
- unet ([`UNet2DConditionModel`]):
294
- A `UNet2DConditionModel` to denoise the encoded image latents.
295
- scheduler ([`SchedulerMixin`]):
296
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
297
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
298
- safety_checker ([`StableDiffusionSafetyChecker`]):
299
- Classification module that estimates whether generated images could be considered offensive or harmful.
300
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
301
- about a model's potential harms.
302
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
303
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
304
- requires_safety_checker (bool):
305
- Whether a safety checker is needed for this pipeline.
306
- """
307
-
308
- model_cpu_offload_seq = "text_encoder->unet->vae"
309
- _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
310
- _exclude_from_cpu_offload = ["safety_checker"]
311
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
312
-
313
- objects_text = "Objects: "
314
- bg_prompt_text = "Background prompt: "
315
- bg_prompt_text_no_trailing_space = bg_prompt_text.rstrip()
316
- neg_prompt_text = "Negative prompt: "
317
- neg_prompt_text_no_trailing_space = neg_prompt_text.rstrip()
318
-
319
- def __init__(
320
- self,
321
- vae: AutoencoderKL,
322
- text_encoder: CLIPTextModel,
323
- tokenizer: CLIPTokenizer,
324
- unet: UNet2DConditionModel,
325
- scheduler: KarrasDiffusionSchedulers,
326
- safety_checker: StableDiffusionSafetyChecker,
327
- feature_extractor: CLIPImageProcessor,
328
- image_encoder: CLIPVisionModelWithProjection = None,
329
- requires_safety_checker: bool = True,
330
- ):
331
- # This is copied from StableDiffusionPipeline, with hook initizations for LMD+.
332
- super().__init__()
333
-
334
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
335
- deprecation_message = (
336
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
337
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
338
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
339
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
340
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
341
- " file"
342
- )
343
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
344
- new_config = dict(scheduler.config)
345
- new_config["steps_offset"] = 1
346
- scheduler._internal_dict = FrozenDict(new_config)
347
-
348
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
349
- deprecation_message = (
350
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
351
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
352
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
353
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
354
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
355
- )
356
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
357
- new_config = dict(scheduler.config)
358
- new_config["clip_sample"] = False
359
- scheduler._internal_dict = FrozenDict(new_config)
360
-
361
- if safety_checker is None and requires_safety_checker:
362
- logger.warning(
363
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
364
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
365
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
366
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
367
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
368
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
369
- )
370
-
371
- if safety_checker is not None and feature_extractor is None:
372
- raise ValueError(
373
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
374
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
375
- )
376
-
377
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
378
- version.parse(unet.config._diffusers_version).base_version
379
- ) < version.parse("0.9.0.dev0")
380
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
381
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
382
- deprecation_message = (
383
- "The configuration file of the unet has set the default `sample_size` to smaller than"
384
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
385
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
386
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
387
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
388
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
389
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
390
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
391
- " the `unet/config.json` file"
392
- )
393
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
394
- new_config = dict(unet.config)
395
- new_config["sample_size"] = 64
396
- unet._internal_dict = FrozenDict(new_config)
397
-
398
- self.register_modules(
399
- vae=vae,
400
- text_encoder=text_encoder,
401
- tokenizer=tokenizer,
402
- unet=unet,
403
- scheduler=scheduler,
404
- safety_checker=safety_checker,
405
- feature_extractor=feature_extractor,
406
- image_encoder=image_encoder,
407
- )
408
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
409
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
410
- self.register_to_config(requires_safety_checker=requires_safety_checker)
411
-
412
- # Initialize the attention hooks for LLM-grounded Diffusion
413
- self.register_attn_hooks(unet)
414
- self._saved_attn = None
415
-
416
- def attn_hook(self, name, query, key, value, attention_probs):
417
- if name in DEFAULT_GUIDANCE_ATTN_KEYS:
418
- self._saved_attn[name] = attention_probs
419
-
420
- @classmethod
421
- def convert_box(cls, box, height, width):
422
- # box: x, y, w, h (in 512 format) -> x_min, y_min, x_max, y_max
423
- x_min, y_min = box[0] / width, box[1] / height
424
- w_box, h_box = box[2] / width, box[3] / height
425
-
426
- x_max, y_max = x_min + w_box, y_min + h_box
427
-
428
- return x_min, y_min, x_max, y_max
429
-
430
- @classmethod
431
- def _parse_response_with_negative(cls, text):
432
- if not text:
433
- raise ValueError("LLM response is empty")
434
-
435
- if cls.objects_text in text:
436
- text = text.split(cls.objects_text)[1]
437
-
438
- text_split = text.split(cls.bg_prompt_text_no_trailing_space)
439
- if len(text_split) == 2:
440
- gen_boxes, text_rem = text_split
441
- else:
442
- raise ValueError(f"LLM response is incomplete: {text}")
443
-
444
- text_split = text_rem.split(cls.neg_prompt_text_no_trailing_space)
445
-
446
- if len(text_split) == 2:
447
- bg_prompt, neg_prompt = text_split
448
- else:
449
- raise ValueError(f"LLM response is incomplete: {text}")
450
-
451
- try:
452
- gen_boxes = ast.literal_eval(gen_boxes)
453
- except SyntaxError as e:
454
- # Sometimes the response is in plain text
455
- if "No objects" in gen_boxes or gen_boxes.strip() == "":
456
- gen_boxes = []
457
- else:
458
- raise e
459
- bg_prompt = bg_prompt.strip()
460
- neg_prompt = neg_prompt.strip()
461
-
462
- # LLM may return "None" to mean no negative prompt provided.
463
- if neg_prompt == "None":
464
- neg_prompt = ""
465
-
466
- return gen_boxes, bg_prompt, neg_prompt
467
-
468
- @classmethod
469
- def parse_llm_response(cls, response, canvas_height=512, canvas_width=512):
470
- # Infer from spec
471
- gen_boxes, bg_prompt, neg_prompt = cls._parse_response_with_negative(text=response)
472
-
473
- gen_boxes = sorted(gen_boxes, key=lambda gen_box: gen_box[0])
474
-
475
- phrases = [name for name, _ in gen_boxes]
476
- boxes = [cls.convert_box(box, height=canvas_height, width=canvas_width) for _, box in gen_boxes]
477
-
478
- return phrases, boxes, bg_prompt, neg_prompt
479
-
480
- def check_inputs(
481
- self,
482
- prompt,
483
- height,
484
- width,
485
- callback_steps,
486
- phrases,
487
- boxes,
488
- negative_prompt=None,
489
- prompt_embeds=None,
490
- negative_prompt_embeds=None,
491
- phrase_indices=None,
492
- ):
493
- if height % 8 != 0 or width % 8 != 0:
494
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
495
-
496
- if (callback_steps is None) or (
497
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
498
- ):
499
- raise ValueError(
500
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
501
- f" {type(callback_steps)}."
502
- )
503
-
504
- if prompt is not None and prompt_embeds is not None:
505
- raise ValueError(
506
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
507
- " only forward one of the two."
508
- )
509
- elif prompt is None and prompt_embeds is None:
510
- raise ValueError(
511
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
512
- )
513
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
514
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
515
- elif prompt is None and phrase_indices is None:
516
- raise ValueError("If the prompt is None, the phrase_indices cannot be None")
517
-
518
- if negative_prompt is not None and negative_prompt_embeds is not None:
519
- raise ValueError(
520
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
521
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
522
- )
523
-
524
- if prompt_embeds is not None and negative_prompt_embeds is not None:
525
- if prompt_embeds.shape != negative_prompt_embeds.shape:
526
- raise ValueError(
527
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
528
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
529
- f" {negative_prompt_embeds.shape}."
530
- )
531
-
532
- if len(phrases) != len(boxes):
533
- raise ValueError(
534
- "length of `phrases` and `boxes` has to be same, but"
535
- f" got: `phrases` {len(phrases)} != `boxes` {len(boxes)}"
536
- )
537
-
538
- def register_attn_hooks(self, unet):
539
- """Registering hooks to obtain the attention maps for guidance"""
540
-
541
- attn_procs = {}
542
-
543
- for name in unet.attn_processors.keys():
544
- # Only obtain the queries and keys from cross-attention
545
- if name.endswith("attn1.processor") or name.endswith("fuser.attn.processor"):
546
- # Keep the same attn_processors for self-attention (no hooks for self-attention)
547
- attn_procs[name] = unet.attn_processors[name]
548
- continue
549
-
550
- cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
551
-
552
- if name.startswith("mid_block"):
553
- hidden_size = unet.config.block_out_channels[-1]
554
- elif name.startswith("up_blocks"):
555
- block_id = int(name[len("up_blocks.")])
556
- hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
557
- elif name.startswith("down_blocks"):
558
- block_id = int(name[len("down_blocks.")])
559
- hidden_size = unet.config.block_out_channels[block_id]
560
-
561
- attn_procs[name] = AttnProcessorWithHook(
562
- attn_processor_key=name,
563
- hidden_size=hidden_size,
564
- cross_attention_dim=cross_attention_dim,
565
- hook=self.attn_hook,
566
- fast_attn=True,
567
- # Not enabled by default
568
- enabled=False,
569
- )
570
-
571
- unet.set_attn_processor(attn_procs)
572
-
573
- def enable_fuser(self, enabled=True):
574
- for module in self.unet.modules():
575
- if isinstance(module, GatedSelfAttentionDense):
576
- module.enabled = enabled
577
-
578
- def enable_attn_hook(self, enabled=True):
579
- for module in self.unet.attn_processors.values():
580
- if isinstance(module, AttnProcessorWithHook):
581
- module.enabled = enabled
582
-
583
- def get_token_map(self, prompt, padding="do_not_pad", verbose=False):
584
- """Get a list of mapping: prompt index to str (prompt in a list of token str)"""
585
- fg_prompt_tokens = self.tokenizer([prompt], padding=padding, max_length=77, return_tensors="np")
586
- input_ids = fg_prompt_tokens["input_ids"][0]
587
-
588
- token_map = []
589
- for ind, item in enumerate(input_ids.tolist()):
590
- token = self.tokenizer._convert_id_to_token(item)
591
-
592
- if verbose:
593
- logger.info(f"{ind}, {token} ({item})")
594
-
595
- token_map.append(token)
596
-
597
- return token_map
598
-
599
- def get_phrase_indices(
600
- self,
601
- prompt,
602
- phrases,
603
- token_map=None,
604
- add_suffix_if_not_found=False,
605
- verbose=False,
606
- ):
607
- for obj in phrases:
608
- # Suffix the prompt with object name for attention guidance if object is not in the prompt, using "|" to separate the prompt and the suffix
609
- if obj not in prompt:
610
- prompt += "| " + obj
611
-
612
- if token_map is None:
613
- # We allow using a pre-computed token map.
614
- token_map = self.get_token_map(prompt=prompt, padding="do_not_pad", verbose=verbose)
615
- token_map_str = " ".join(token_map)
616
-
617
- phrase_indices = []
618
-
619
- for obj in phrases:
620
- phrase_token_map = self.get_token_map(prompt=obj, padding="do_not_pad", verbose=verbose)
621
- # Remove <bos> and <eos> in substr
622
- phrase_token_map = phrase_token_map[1:-1]
623
- phrase_token_map_len = len(phrase_token_map)
624
- phrase_token_map_str = " ".join(phrase_token_map)
625
-
626
- if verbose:
627
- logger.info(
628
- "Full str:",
629
- token_map_str,
630
- "Substr:",
631
- phrase_token_map_str,
632
- "Phrase:",
633
- phrases,
634
- )
635
-
636
- # Count the number of token before substr
637
- # The substring comes with a trailing space that needs to be removed by minus one in the index.
638
- obj_first_index = len(token_map_str[: token_map_str.index(phrase_token_map_str) - 1].split(" "))
639
-
640
- obj_position = list(range(obj_first_index, obj_first_index + phrase_token_map_len))
641
- phrase_indices.append(obj_position)
642
-
643
- if add_suffix_if_not_found:
644
- return phrase_indices, prompt
645
-
646
- return phrase_indices
647
-
648
- def add_ca_loss_per_attn_map_to_loss(
649
- self,
650
- loss,
651
- attn_map,
652
- object_number,
653
- bboxes,
654
- phrase_indices,
655
- fg_top_p=0.2,
656
- bg_top_p=0.2,
657
- fg_weight=1.0,
658
- bg_weight=1.0,
659
- ):
660
- # b is the number of heads, not batch
661
- b, i, j = attn_map.shape
662
- H = W = int(math.sqrt(i))
663
- for obj_idx in range(object_number):
664
- obj_loss = 0
665
- mask = torch.zeros(size=(H, W), device="cuda")
666
- obj_boxes = bboxes[obj_idx]
667
-
668
- # We support two level (one box per phrase) and three level (multiple boxes per phrase)
669
- if not isinstance(obj_boxes[0], Iterable):
670
- obj_boxes = [obj_boxes]
671
-
672
- for obj_box in obj_boxes:
673
- # x_min, y_min, x_max, y_max = int(obj_box[0] * W), int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H)
674
- x_min, y_min, x_max, y_max = scale_proportion(obj_box, H=H, W=W)
675
- mask[y_min:y_max, x_min:x_max] = 1
676
-
677
- for obj_position in phrase_indices[obj_idx]:
678
- # Could potentially optimize to compute this for loop in batch.
679
- # Could crop the ref cross attention before saving to save memory.
680
-
681
- ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W)
682
-
683
- # shape: (b, H * W)
684
- ca_map_obj = attn_map[:, :, obj_position] # .reshape(b, H, W)
685
- k_fg = (mask.sum() * fg_top_p).long().clamp_(min=1)
686
- k_bg = ((1 - mask).sum() * bg_top_p).long().clamp_(min=1)
687
-
688
- mask_1d = mask.view(1, -1)
689
-
690
- # Max-based loss function
691
-
692
- # Take the topk over spatial dimension, and then take the sum over heads dim
693
- # The mean is over k_fg and k_bg dimension, so we don't need to sum and divide on our own.
694
- obj_loss += (1 - (ca_map_obj * mask_1d).topk(k=k_fg).values.mean(dim=1)).sum(dim=0) * fg_weight
695
- obj_loss += ((ca_map_obj * (1 - mask_1d)).topk(k=k_bg).values.mean(dim=1)).sum(dim=0) * bg_weight
696
-
697
- loss += obj_loss / len(phrase_indices[obj_idx])
698
-
699
- return loss
700
-
701
- def compute_ca_loss(
702
- self,
703
- saved_attn,
704
- bboxes,
705
- phrase_indices,
706
- guidance_attn_keys,
707
- verbose=False,
708
- **kwargs,
709
- ):
710
- """
711
- The `saved_attn` is supposed to be passed to `save_attn_to_dict` in `cross_attention_kwargs` prior to computing ths loss.
712
- `AttnProcessor` will put attention maps into the `save_attn_to_dict`.
713
-
714
- `index` is the timestep.
715
- `ref_ca_word_token_only`: This has precedence over `ref_ca_last_token_only` (i.e., if both are enabled, we take the token from word rather than the last token).
716
- `ref_ca_last_token_only`: `ref_ca_saved_attn` comes from the attention map of the last token of the phrase in single object generation, so we apply it only to the last token of the phrase in overall generation if this is set to True. If set to False, `ref_ca_saved_attn` will be applied to all the text tokens.
717
- """
718
- loss = torch.tensor(0).float().cuda()
719
- object_number = len(bboxes)
720
- if object_number == 0:
721
- return loss
722
-
723
- for attn_key in guidance_attn_keys:
724
- # We only have 1 cross attention for mid.
725
-
726
- attn_map_integrated = saved_attn[attn_key]
727
- if not attn_map_integrated.is_cuda:
728
- attn_map_integrated = attn_map_integrated.cuda()
729
- # Example dimension: [20, 64, 77]
730
- attn_map = attn_map_integrated.squeeze(dim=0)
731
-
732
- loss = self.add_ca_loss_per_attn_map_to_loss(
733
- loss, attn_map, object_number, bboxes, phrase_indices, **kwargs
734
- )
735
-
736
- num_attn = len(guidance_attn_keys)
737
-
738
- if num_attn > 0:
739
- loss = loss / (object_number * num_attn)
740
-
741
- return loss
742
-
743
- @torch.no_grad()
744
- @replace_example_docstring(EXAMPLE_DOC_STRING)
745
- def __call__(
746
- self,
747
- prompt: Union[str, List[str]] = None,
748
- height: Optional[int] = None,
749
- width: Optional[int] = None,
750
- num_inference_steps: int = 50,
751
- guidance_scale: float = 7.5,
752
- gligen_scheduled_sampling_beta: float = 0.3,
753
- phrases: List[str] = None,
754
- boxes: List[List[float]] = None,
755
- negative_prompt: Optional[Union[str, List[str]]] = None,
756
- num_images_per_prompt: Optional[int] = 1,
757
- eta: float = 0.0,
758
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
759
- latents: Optional[torch.Tensor] = None,
760
- prompt_embeds: Optional[torch.Tensor] = None,
761
- negative_prompt_embeds: Optional[torch.Tensor] = None,
762
- ip_adapter_image: Optional[PipelineImageInput] = None,
763
- output_type: Optional[str] = "pil",
764
- return_dict: bool = True,
765
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
766
- callback_steps: int = 1,
767
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
768
- clip_skip: Optional[int] = None,
769
- lmd_guidance_kwargs: Optional[Dict[str, Any]] = {},
770
- phrase_indices: Optional[List[int]] = None,
771
- ):
772
- r"""
773
- The call function to the pipeline for generation.
774
-
775
- Args:
776
- prompt (`str` or `List[str]`, *optional*):
777
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
778
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
779
- The height in pixels of the generated image.
780
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
781
- The width in pixels of the generated image.
782
- num_inference_steps (`int`, *optional*, defaults to 50):
783
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
784
- expense of slower inference.
785
- guidance_scale (`float`, *optional*, defaults to 7.5):
786
- A higher guidance scale value encourages the model to generate images closely linked to the text
787
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
788
- phrases (`List[str]`):
789
- The phrases to guide what to include in each of the regions defined by the corresponding
790
- `boxes`. There should only be one phrase per bounding box.
791
- boxes (`List[List[float]]`):
792
- The bounding boxes that identify rectangular regions of the image that are going to be filled with the
793
- content described by the corresponding `phrases`. Each rectangular box is defined as a
794
- `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1].
795
- gligen_scheduled_sampling_beta (`float`, defaults to 0.3):
796
- Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image
797
- Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for
798
- scheduled sampling during inference for improved quality and controllability.
799
- negative_prompt (`str` or `List[str]`, *optional*):
800
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
801
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
802
- num_images_per_prompt (`int`, *optional*, defaults to 1):
803
- The number of images to generate per prompt.
804
- eta (`float`, *optional*, defaults to 0.0):
805
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
806
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
807
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
808
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
809
- generation deterministic.
810
- latents (`torch.Tensor`, *optional*):
811
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
812
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
813
- tensor is generated by sampling using the supplied random `generator`.
814
- prompt_embeds (`torch.Tensor`, *optional*):
815
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
816
- provided, text embeddings are generated from the `prompt` input argument.
817
- negative_prompt_embeds (`torch.Tensor`, *optional*):
818
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
819
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
820
- ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
821
- output_type (`str`, *optional*, defaults to `"pil"`):
822
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
823
- return_dict (`bool`, *optional*, defaults to `True`):
824
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
825
- plain tuple.
826
- callback (`Callable`, *optional*):
827
- A function that calls every `callback_steps` steps during inference. The function is called with the
828
- following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
829
- callback_steps (`int`, *optional*, defaults to 1):
830
- The frequency at which the `callback` function is called. If not specified, the callback is called at
831
- every step.
832
- cross_attention_kwargs (`dict`, *optional*):
833
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
834
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
835
- guidance_rescale (`float`, *optional*, defaults to 0.0):
836
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
837
- Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
838
- using zero terminal SNR.
839
- clip_skip (`int`, *optional*):
840
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
841
- the output of the pre-final layer will be used for computing the prompt embeddings.
842
- lmd_guidance_kwargs (`dict`, *optional*):
843
- A kwargs dictionary that if specified is passed along to `latent_lmd_guidance` function. Useful keys include `loss_scale` (the guidance strength), `loss_threshold` (when loss is lower than this value, the guidance is not applied anymore), `max_iter` (the number of iterations of guidance for each step), and `guidance_timesteps` (the number of diffusion timesteps to apply guidance on). See `latent_lmd_guidance` for implementation details.
844
- phrase_indices (`list` of `list`, *optional*): The indices of the tokens of each phrase in the overall prompt. If omitted, the pipeline will match the first token subsequence. The pipeline will append the missing phrases to the end of the prompt by default.
845
- Examples:
846
-
847
- Returns:
848
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
849
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
850
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
851
- second element is a list of `bool`s indicating whether the corresponding generated image contains
852
- "not-safe-for-work" (nsfw) content.
853
- """
854
- # 0. Default height and width to unet
855
- height = height or self.unet.config.sample_size * self.vae_scale_factor
856
- width = width or self.unet.config.sample_size * self.vae_scale_factor
857
-
858
- # 1. Check inputs. Raise error if not correct
859
- self.check_inputs(
860
- prompt,
861
- height,
862
- width,
863
- callback_steps,
864
- phrases,
865
- boxes,
866
- negative_prompt,
867
- prompt_embeds,
868
- negative_prompt_embeds,
869
- phrase_indices,
870
- )
871
-
872
- # 2. Define call parameters
873
- if prompt is not None and isinstance(prompt, str):
874
- batch_size = 1
875
- if phrase_indices is None:
876
- phrase_indices, prompt = self.get_phrase_indices(prompt, phrases, add_suffix_if_not_found=True)
877
- elif prompt is not None and isinstance(prompt, list):
878
- batch_size = len(prompt)
879
- if phrase_indices is None:
880
- phrase_indices = []
881
- prompt_parsed = []
882
- for prompt_item in prompt:
883
- (
884
- phrase_indices_parsed_item,
885
- prompt_parsed_item,
886
- ) = self.get_phrase_indices(prompt_item, add_suffix_if_not_found=True)
887
- phrase_indices.append(phrase_indices_parsed_item)
888
- prompt_parsed.append(prompt_parsed_item)
889
- prompt = prompt_parsed
890
- else:
891
- batch_size = prompt_embeds.shape[0]
892
-
893
- device = self._execution_device
894
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
895
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
896
- # corresponds to doing no classifier free guidance.
897
- do_classifier_free_guidance = guidance_scale > 1.0
898
-
899
- # 3. Encode input prompt
900
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
901
- prompt,
902
- device,
903
- num_images_per_prompt,
904
- do_classifier_free_guidance,
905
- negative_prompt,
906
- prompt_embeds=prompt_embeds,
907
- negative_prompt_embeds=negative_prompt_embeds,
908
- clip_skip=clip_skip,
909
- )
910
-
911
- cond_prompt_embeds = prompt_embeds
912
-
913
- # For classifier free guidance, we need to do two forward passes.
914
- # Here we concatenate the unconditional and text embeddings into a single batch
915
- # to avoid doing two forward passes
916
- if do_classifier_free_guidance:
917
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
918
-
919
- if ip_adapter_image is not None:
920
- image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
921
- if self.do_classifier_free_guidance:
922
- image_embeds = torch.cat([negative_image_embeds, image_embeds])
923
-
924
- # 4. Prepare timesteps
925
- self.scheduler.set_timesteps(num_inference_steps, device=device)
926
- timesteps = self.scheduler.timesteps
927
-
928
- # 5. Prepare latent variables
929
- num_channels_latents = self.unet.config.in_channels
930
- latents = self.prepare_latents(
931
- batch_size * num_images_per_prompt,
932
- num_channels_latents,
933
- height,
934
- width,
935
- prompt_embeds.dtype,
936
- device,
937
- generator,
938
- latents,
939
- )
940
-
941
- # 5.1 Prepare GLIGEN variables
942
- max_objs = 30
943
- if len(boxes) > max_objs:
944
- warnings.warn(
945
- f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.",
946
- FutureWarning,
947
- )
948
- phrases = phrases[:max_objs]
949
- boxes = boxes[:max_objs]
950
-
951
- n_objs = len(boxes)
952
- if n_objs:
953
- # prepare batched input to the PositionNet (boxes, phrases, mask)
954
- # Get tokens for phrases from pre-trained CLIPTokenizer
955
- tokenizer_inputs = self.tokenizer(phrases, padding=True, return_tensors="pt").to(device)
956
- # For the token, we use the same pre-trained text encoder
957
- # to obtain its text feature
958
- _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output
959
-
960
- # For each entity, described in phrases, is denoted with a bounding box,
961
- # we represent the location information as (xmin,ymin,xmax,ymax)
962
- cond_boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype)
963
- if n_objs:
964
- cond_boxes[:n_objs] = torch.tensor(boxes)
965
- text_embeddings = torch.zeros(
966
- max_objs,
967
- self.unet.config.cross_attention_dim,
968
- device=device,
969
- dtype=self.text_encoder.dtype,
970
- )
971
- if n_objs:
972
- text_embeddings[:n_objs] = _text_embeddings
973
- # Generate a mask for each object that is entity described by phrases
974
- masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype)
975
- masks[:n_objs] = 1
976
-
977
- repeat_batch = batch_size * num_images_per_prompt
978
- cond_boxes = cond_boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
979
- text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
980
- masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone()
981
- if do_classifier_free_guidance:
982
- repeat_batch = repeat_batch * 2
983
- cond_boxes = torch.cat([cond_boxes] * 2)
984
- text_embeddings = torch.cat([text_embeddings] * 2)
985
- masks = torch.cat([masks] * 2)
986
- masks[: repeat_batch // 2] = 0
987
- if cross_attention_kwargs is None:
988
- cross_attention_kwargs = {}
989
- cross_attention_kwargs["gligen"] = {
990
- "boxes": cond_boxes,
991
- "positive_embeddings": text_embeddings,
992
- "masks": masks,
993
- }
994
-
995
- num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps))
996
- self.enable_fuser(True)
997
-
998
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
999
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1000
-
1001
- # 6.1 Add image embeds for IP-Adapter
1002
- added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
1003
-
1004
- loss_attn = torch.tensor(10000.0)
1005
-
1006
- # 7. Denoising loop
1007
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1008
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1009
- for i, t in enumerate(timesteps):
1010
- # Scheduled sampling
1011
- if i == num_grounding_steps:
1012
- self.enable_fuser(False)
1013
-
1014
- if latents.shape[1] != 4:
1015
- latents = torch.randn_like(latents[:, :4])
1016
-
1017
- # 7.1 Perform LMD guidance
1018
- if boxes:
1019
- latents, loss_attn = self.latent_lmd_guidance(
1020
- cond_prompt_embeds,
1021
- index=i,
1022
- boxes=boxes,
1023
- phrase_indices=phrase_indices,
1024
- t=t,
1025
- latents=latents,
1026
- loss=loss_attn,
1027
- **lmd_guidance_kwargs,
1028
- )
1029
-
1030
- # expand the latents if we are doing classifier free guidance
1031
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1032
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1033
-
1034
- # predict the noise residual
1035
- noise_pred = self.unet(
1036
- latent_model_input,
1037
- t,
1038
- encoder_hidden_states=prompt_embeds,
1039
- cross_attention_kwargs=cross_attention_kwargs,
1040
- added_cond_kwargs=added_cond_kwargs,
1041
- ).sample
1042
-
1043
- # perform guidance
1044
- if do_classifier_free_guidance:
1045
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1046
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1047
-
1048
- # compute the previous noisy sample x_t -> x_t-1
1049
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1050
-
1051
- # call the callback, if provided
1052
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1053
- progress_bar.update()
1054
- if callback is not None and i % callback_steps == 0:
1055
- step_idx = i // getattr(self.scheduler, "order", 1)
1056
- callback(step_idx, t, latents)
1057
-
1058
- if not output_type == "latent":
1059
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1060
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1061
- else:
1062
- image = latents
1063
- has_nsfw_concept = None
1064
-
1065
- if has_nsfw_concept is None:
1066
- do_denormalize = [True] * image.shape[0]
1067
- else:
1068
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1069
-
1070
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1071
-
1072
- # Offload last model to CPU
1073
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1074
- self.final_offload_hook.offload()
1075
-
1076
- if not return_dict:
1077
- return (image, has_nsfw_concept)
1078
-
1079
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1080
-
1081
- @torch.set_grad_enabled(True)
1082
- def latent_lmd_guidance(
1083
- self,
1084
- cond_embeddings,
1085
- index,
1086
- boxes,
1087
- phrase_indices,
1088
- t,
1089
- latents,
1090
- loss,
1091
- *,
1092
- loss_scale=20,
1093
- loss_threshold=5.0,
1094
- max_iter=[3] * 5 + [2] * 5 + [1] * 5,
1095
- guidance_timesteps=15,
1096
- cross_attention_kwargs=None,
1097
- guidance_attn_keys=DEFAULT_GUIDANCE_ATTN_KEYS,
1098
- verbose=False,
1099
- clear_cache=False,
1100
- unet_additional_kwargs={},
1101
- guidance_callback=None,
1102
- **kwargs,
1103
- ):
1104
- scheduler, unet = self.scheduler, self.unet
1105
-
1106
- iteration = 0
1107
-
1108
- if index < guidance_timesteps:
1109
- if isinstance(max_iter, list):
1110
- max_iter = max_iter[index]
1111
-
1112
- if verbose:
1113
- logger.info(
1114
- f"time index {index}, loss: {loss.item()/loss_scale:.3f} (de-scaled with scale {loss_scale:.1f}), loss threshold: {loss_threshold:.3f}"
1115
- )
1116
-
1117
- try:
1118
- self.enable_attn_hook(enabled=True)
1119
-
1120
- while (
1121
- loss.item() / loss_scale > loss_threshold and iteration < max_iter and index < guidance_timesteps
1122
- ):
1123
- self._saved_attn = {}
1124
-
1125
- latents.requires_grad_(True)
1126
- latent_model_input = latents
1127
- latent_model_input = scheduler.scale_model_input(latent_model_input, t)
1128
-
1129
- unet(
1130
- latent_model_input,
1131
- t,
1132
- encoder_hidden_states=cond_embeddings,
1133
- cross_attention_kwargs=cross_attention_kwargs,
1134
- **unet_additional_kwargs,
1135
- )
1136
-
1137
- # update latents with guidance
1138
- loss = (
1139
- self.compute_ca_loss(
1140
- saved_attn=self._saved_attn,
1141
- bboxes=boxes,
1142
- phrase_indices=phrase_indices,
1143
- guidance_attn_keys=guidance_attn_keys,
1144
- verbose=verbose,
1145
- **kwargs,
1146
- )
1147
- * loss_scale
1148
- )
1149
-
1150
- if torch.isnan(loss):
1151
- raise RuntimeError("**Loss is NaN**")
1152
-
1153
- # This callback allows visualizations.
1154
- if guidance_callback is not None:
1155
- guidance_callback(self, latents, loss, iteration, index)
1156
-
1157
- self._saved_attn = None
1158
-
1159
- grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents])[0]
1160
-
1161
- latents.requires_grad_(False)
1162
-
1163
- # Scaling with classifier guidance
1164
- alpha_prod_t = scheduler.alphas_cumprod[t]
1165
- # Classifier guidance: https://arxiv.org/pdf/2105.05233.pdf
1166
- # DDIM: https://arxiv.org/pdf/2010.02502.pdf
1167
- scale = (1 - alpha_prod_t) ** (0.5)
1168
- latents = latents - scale * grad_cond
1169
-
1170
- iteration += 1
1171
-
1172
- if clear_cache:
1173
- gc.collect()
1174
- torch.cuda.empty_cache()
1175
-
1176
- if verbose:
1177
- logger.info(
1178
- f"time index {index}, loss: {loss.item()/loss_scale:.3f}, loss threshold: {loss_threshold:.3f}, iteration: {iteration}"
1179
- )
1180
-
1181
- finally:
1182
- self.enable_attn_hook(enabled=False)
1183
-
1184
- return latents, loss
1185
-
1186
- # Below are methods copied from StableDiffusionPipeline
1187
- # The design choice of not inheriting from StableDiffusionPipeline is discussed here: https://github.com/huggingface/diffusers/pull/5993#issuecomment-1834258517
1188
-
1189
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
1190
- def _encode_prompt(
1191
- self,
1192
- prompt,
1193
- device,
1194
- num_images_per_prompt,
1195
- do_classifier_free_guidance,
1196
- negative_prompt=None,
1197
- prompt_embeds: Optional[torch.Tensor] = None,
1198
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1199
- lora_scale: Optional[float] = None,
1200
- **kwargs,
1201
- ):
1202
- deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
1203
- deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
1204
-
1205
- prompt_embeds_tuple = self.encode_prompt(
1206
- prompt=prompt,
1207
- device=device,
1208
- num_images_per_prompt=num_images_per_prompt,
1209
- do_classifier_free_guidance=do_classifier_free_guidance,
1210
- negative_prompt=negative_prompt,
1211
- prompt_embeds=prompt_embeds,
1212
- negative_prompt_embeds=negative_prompt_embeds,
1213
- lora_scale=lora_scale,
1214
- **kwargs,
1215
- )
1216
-
1217
- # concatenate for backwards comp
1218
- prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
1219
-
1220
- return prompt_embeds
1221
-
1222
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
1223
- def encode_prompt(
1224
- self,
1225
- prompt,
1226
- device,
1227
- num_images_per_prompt,
1228
- do_classifier_free_guidance,
1229
- negative_prompt=None,
1230
- prompt_embeds: Optional[torch.Tensor] = None,
1231
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1232
- lora_scale: Optional[float] = None,
1233
- clip_skip: Optional[int] = None,
1234
- ):
1235
- r"""
1236
- Encodes the prompt into text encoder hidden states.
1237
-
1238
- Args:
1239
- prompt (`str` or `List[str]`, *optional*):
1240
- prompt to be encoded
1241
- device: (`torch.device`):
1242
- torch device
1243
- num_images_per_prompt (`int`):
1244
- number of images that should be generated per prompt
1245
- do_classifier_free_guidance (`bool`):
1246
- whether to use classifier free guidance or not
1247
- negative_prompt (`str` or `List[str]`, *optional*):
1248
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
1249
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1250
- less than `1`).
1251
- prompt_embeds (`torch.Tensor`, *optional*):
1252
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1253
- provided, text embeddings will be generated from `prompt` input argument.
1254
- negative_prompt_embeds (`torch.Tensor`, *optional*):
1255
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1256
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1257
- argument.
1258
- lora_scale (`float`, *optional*):
1259
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
1260
- clip_skip (`int`, *optional*):
1261
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1262
- the output of the pre-final layer will be used for computing the prompt embeddings.
1263
- """
1264
- # set lora scale so that monkey patched LoRA
1265
- # function of text encoder can correctly access it
1266
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
1267
- self._lora_scale = lora_scale
1268
-
1269
- # dynamically adjust the LoRA scale
1270
- if not USE_PEFT_BACKEND:
1271
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
1272
- else:
1273
- scale_lora_layers(self.text_encoder, lora_scale)
1274
-
1275
- if prompt is not None and isinstance(prompt, str):
1276
- batch_size = 1
1277
- elif prompt is not None and isinstance(prompt, list):
1278
- batch_size = len(prompt)
1279
- else:
1280
- batch_size = prompt_embeds.shape[0]
1281
-
1282
- if prompt_embeds is None:
1283
- # textual inversion: process multi-vector tokens if necessary
1284
- if isinstance(self, TextualInversionLoaderMixin):
1285
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
1286
-
1287
- text_inputs = self.tokenizer(
1288
- prompt,
1289
- padding="max_length",
1290
- max_length=self.tokenizer.model_max_length,
1291
- truncation=True,
1292
- return_tensors="pt",
1293
- )
1294
- text_input_ids = text_inputs.input_ids
1295
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
1296
-
1297
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
1298
- text_input_ids, untruncated_ids
1299
- ):
1300
- removed_text = self.tokenizer.batch_decode(
1301
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
1302
- )
1303
- logger.warning(
1304
- "The following part of your input was truncated because CLIP can only handle sequences up to"
1305
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
1306
- )
1307
-
1308
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
1309
- attention_mask = text_inputs.attention_mask.to(device)
1310
- else:
1311
- attention_mask = None
1312
-
1313
- if clip_skip is None:
1314
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
1315
- prompt_embeds = prompt_embeds[0]
1316
- else:
1317
- prompt_embeds = self.text_encoder(
1318
- text_input_ids.to(device),
1319
- attention_mask=attention_mask,
1320
- output_hidden_states=True,
1321
- )
1322
- # Access the `hidden_states` first, that contains a tuple of
1323
- # all the hidden states from the encoder layers. Then index into
1324
- # the tuple to access the hidden states from the desired layer.
1325
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
1326
- # We also need to apply the final LayerNorm here to not mess with the
1327
- # representations. The `last_hidden_states` that we typically use for
1328
- # obtaining the final prompt representations passes through the LayerNorm
1329
- # layer.
1330
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
1331
-
1332
- if self.text_encoder is not None:
1333
- prompt_embeds_dtype = self.text_encoder.dtype
1334
- elif self.unet is not None:
1335
- prompt_embeds_dtype = self.unet.dtype
1336
- else:
1337
- prompt_embeds_dtype = prompt_embeds.dtype
1338
-
1339
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
1340
-
1341
- bs_embed, seq_len, _ = prompt_embeds.shape
1342
- # duplicate text embeddings for each generation per prompt, using mps friendly method
1343
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
1344
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
1345
-
1346
- # get unconditional embeddings for classifier free guidance
1347
- if do_classifier_free_guidance and negative_prompt_embeds is None:
1348
- uncond_tokens: List[str]
1349
- if negative_prompt is None:
1350
- uncond_tokens = [""] * batch_size
1351
- elif prompt is not None and type(prompt) is not type(negative_prompt):
1352
- raise TypeError(
1353
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
1354
- f" {type(prompt)}."
1355
- )
1356
- elif isinstance(negative_prompt, str):
1357
- uncond_tokens = [negative_prompt]
1358
- elif batch_size != len(negative_prompt):
1359
- raise ValueError(
1360
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
1361
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
1362
- " the batch size of `prompt`."
1363
- )
1364
- else:
1365
- uncond_tokens = negative_prompt
1366
-
1367
- # textual inversion: process multi-vector tokens if necessary
1368
- if isinstance(self, TextualInversionLoaderMixin):
1369
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
1370
-
1371
- max_length = prompt_embeds.shape[1]
1372
- uncond_input = self.tokenizer(
1373
- uncond_tokens,
1374
- padding="max_length",
1375
- max_length=max_length,
1376
- truncation=True,
1377
- return_tensors="pt",
1378
- )
1379
-
1380
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
1381
- attention_mask = uncond_input.attention_mask.to(device)
1382
- else:
1383
- attention_mask = None
1384
-
1385
- negative_prompt_embeds = self.text_encoder(
1386
- uncond_input.input_ids.to(device),
1387
- attention_mask=attention_mask,
1388
- )
1389
- negative_prompt_embeds = negative_prompt_embeds[0]
1390
-
1391
- if do_classifier_free_guidance:
1392
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
1393
- seq_len = negative_prompt_embeds.shape[1]
1394
-
1395
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
1396
-
1397
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
1398
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
1399
-
1400
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
1401
- # Retrieve the original scale by scaling back the LoRA layers
1402
- unscale_lora_layers(self.text_encoder, lora_scale)
1403
-
1404
- return prompt_embeds, negative_prompt_embeds
1405
-
1406
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
1407
- def encode_image(self, image, device, num_images_per_prompt):
1408
- dtype = next(self.image_encoder.parameters()).dtype
1409
-
1410
- if not isinstance(image, torch.Tensor):
1411
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
1412
-
1413
- image = image.to(device=device, dtype=dtype)
1414
- image_embeds = self.image_encoder(image).image_embeds
1415
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
1416
-
1417
- uncond_image_embeds = torch.zeros_like(image_embeds)
1418
- return image_embeds, uncond_image_embeds
1419
-
1420
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
1421
- def run_safety_checker(self, image, device, dtype):
1422
- if self.safety_checker is None:
1423
- has_nsfw_concept = None
1424
- else:
1425
- if torch.is_tensor(image):
1426
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
1427
- else:
1428
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
1429
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
1430
- image, has_nsfw_concept = self.safety_checker(
1431
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
1432
- )
1433
- return image, has_nsfw_concept
1434
-
1435
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
1436
- def decode_latents(self, latents):
1437
- deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
1438
- deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
1439
-
1440
- latents = 1 / self.vae.config.scaling_factor * latents
1441
- image = self.vae.decode(latents, return_dict=False)[0]
1442
- image = (image / 2 + 0.5).clamp(0, 1)
1443
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
1444
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1445
- return image
1446
-
1447
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
1448
- def prepare_extra_step_kwargs(self, generator, eta):
1449
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
1450
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
1451
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
1452
- # and should be between [0, 1]
1453
-
1454
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
1455
- extra_step_kwargs = {}
1456
- if accepts_eta:
1457
- extra_step_kwargs["eta"] = eta
1458
-
1459
- # check if the scheduler accepts generator
1460
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
1461
- if accepts_generator:
1462
- extra_step_kwargs["generator"] = generator
1463
- return extra_step_kwargs
1464
-
1465
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
1466
- def prepare_latents(
1467
- self,
1468
- batch_size,
1469
- num_channels_latents,
1470
- height,
1471
- width,
1472
- dtype,
1473
- device,
1474
- generator,
1475
- latents=None,
1476
- ):
1477
- shape = (
1478
- batch_size,
1479
- num_channels_latents,
1480
- height // self.vae_scale_factor,
1481
- width // self.vae_scale_factor,
1482
- )
1483
- if isinstance(generator, list) and len(generator) != batch_size:
1484
- raise ValueError(
1485
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
1486
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
1487
- )
1488
-
1489
- if latents is None:
1490
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
1491
- else:
1492
- latents = latents.to(device)
1493
-
1494
- # scale the initial noise by the standard deviation required by the scheduler
1495
- latents = latents * self.scheduler.init_noise_sigma
1496
- return latents
1497
-
1498
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1499
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
1500
- """
1501
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
1502
-
1503
- Args:
1504
- timesteps (`torch.Tensor`):
1505
- generate embedding vectors at these timesteps
1506
- embedding_dim (`int`, *optional*, defaults to 512):
1507
- dimension of the embeddings to generate
1508
- dtype:
1509
- data type of the generated embeddings
1510
-
1511
- Returns:
1512
- `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
1513
- """
1514
- assert len(w.shape) == 1
1515
- w = w * 1000.0
1516
-
1517
- half_dim = embedding_dim // 2
1518
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
1519
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
1520
- emb = w.to(dtype)[:, None] * emb[None, :]
1521
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
1522
- if embedding_dim % 2 == 1: # zero pad
1523
- emb = torch.nn.functional.pad(emb, (0, 1))
1524
- assert emb.shape == (w.shape[0], embedding_dim)
1525
- return emb
1526
-
1527
- @property
1528
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
1529
- def guidance_scale(self):
1530
- return self._guidance_scale
1531
-
1532
- @property
1533
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale
1534
- def guidance_rescale(self):
1535
- return self._guidance_rescale
1536
-
1537
- @property
1538
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
1539
- def clip_skip(self):
1540
- return self._clip_skip
1541
-
1542
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1543
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1544
- # corresponds to doing no classifier free guidance.
1545
- @property
1546
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
1547
- def do_classifier_free_guidance(self):
1548
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
1549
-
1550
- @property
1551
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
1552
- def cross_attention_kwargs(self):
1553
- return self._cross_attention_kwargs
1554
-
1555
- @property
1556
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps
1557
- def num_timesteps(self):
1558
- return self._num_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lpw_stable_diffusion.py DELETED
@@ -1,1371 +0,0 @@
1
- import inspect
2
- import re
3
- from typing import Any, Callable, Dict, List, Optional, Union
4
-
5
- import numpy as np
6
- import PIL.Image
7
- import torch
8
- from packaging import version
9
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
-
11
- from diffusers import DiffusionPipeline
12
- from diffusers.configuration_utils import FrozenDict
13
- from diffusers.image_processor import VaeImageProcessor
14
- from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
15
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
- from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
17
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
18
- from diffusers.schedulers import KarrasDiffusionSchedulers
19
- from diffusers.utils import (
20
- PIL_INTERPOLATION,
21
- deprecate,
22
- logging,
23
- )
24
- from diffusers.utils.torch_utils import randn_tensor
25
-
26
-
27
- # ------------------------------------------------------------------------------
28
-
29
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
30
-
31
- re_attention = re.compile(
32
- r"""
33
- \\\(|
34
- \\\)|
35
- \\\[|
36
- \\]|
37
- \\\\|
38
- \\|
39
- \(|
40
- \[|
41
- :([+-]?[.\d]+)\)|
42
- \)|
43
- ]|
44
- [^\\()\[\]:]+|
45
- :
46
- """,
47
- re.X,
48
- )
49
-
50
-
51
- def parse_prompt_attention(text):
52
- """
53
- Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
54
- Accepted tokens are:
55
- (abc) - increases attention to abc by a multiplier of 1.1
56
- (abc:3.12) - increases attention to abc by a multiplier of 3.12
57
- [abc] - decreases attention to abc by a multiplier of 1.1
58
- \\( - literal character '('
59
- \\[ - literal character '['
60
- \\) - literal character ')'
61
- \\] - literal character ']'
62
- \\ - literal character '\'
63
- anything else - just text
64
- >>> parse_prompt_attention('normal text')
65
- [['normal text', 1.0]]
66
- >>> parse_prompt_attention('an (important) word')
67
- [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
68
- >>> parse_prompt_attention('(unbalanced')
69
- [['unbalanced', 1.1]]
70
- >>> parse_prompt_attention('\\(literal\\]')
71
- [['(literal]', 1.0]]
72
- >>> parse_prompt_attention('(unnecessary)(parens)')
73
- [['unnecessaryparens', 1.1]]
74
- >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
75
- [['a ', 1.0],
76
- ['house', 1.5730000000000004],
77
- [' ', 1.1],
78
- ['on', 1.0],
79
- [' a ', 1.1],
80
- ['hill', 0.55],
81
- [', sun, ', 1.1],
82
- ['sky', 1.4641000000000006],
83
- ['.', 1.1]]
84
- """
85
-
86
- res = []
87
- round_brackets = []
88
- square_brackets = []
89
-
90
- round_bracket_multiplier = 1.1
91
- square_bracket_multiplier = 1 / 1.1
92
-
93
- def multiply_range(start_position, multiplier):
94
- for p in range(start_position, len(res)):
95
- res[p][1] *= multiplier
96
-
97
- for m in re_attention.finditer(text):
98
- text = m.group(0)
99
- weight = m.group(1)
100
-
101
- if text.startswith("\\"):
102
- res.append([text[1:], 1.0])
103
- elif text == "(":
104
- round_brackets.append(len(res))
105
- elif text == "[":
106
- square_brackets.append(len(res))
107
- elif weight is not None and len(round_brackets) > 0:
108
- multiply_range(round_brackets.pop(), float(weight))
109
- elif text == ")" and len(round_brackets) > 0:
110
- multiply_range(round_brackets.pop(), round_bracket_multiplier)
111
- elif text == "]" and len(square_brackets) > 0:
112
- multiply_range(square_brackets.pop(), square_bracket_multiplier)
113
- else:
114
- res.append([text, 1.0])
115
-
116
- for pos in round_brackets:
117
- multiply_range(pos, round_bracket_multiplier)
118
-
119
- for pos in square_brackets:
120
- multiply_range(pos, square_bracket_multiplier)
121
-
122
- if len(res) == 0:
123
- res = [["", 1.0]]
124
-
125
- # merge runs of identical weights
126
- i = 0
127
- while i + 1 < len(res):
128
- if res[i][1] == res[i + 1][1]:
129
- res[i][0] += res[i + 1][0]
130
- res.pop(i + 1)
131
- else:
132
- i += 1
133
-
134
- return res
135
-
136
-
137
- def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
138
- r"""
139
- Tokenize a list of prompts and return its tokens with weights of each token.
140
-
141
- No padding, starting or ending token is included.
142
- """
143
- tokens = []
144
- weights = []
145
- truncated = False
146
- for text in prompt:
147
- texts_and_weights = parse_prompt_attention(text)
148
- text_token = []
149
- text_weight = []
150
- for word, weight in texts_and_weights:
151
- # tokenize and discard the starting and the ending token
152
- token = pipe.tokenizer(word).input_ids[1:-1]
153
- text_token += token
154
- # copy the weight by length of token
155
- text_weight += [weight] * len(token)
156
- # stop if the text is too long (longer than truncation limit)
157
- if len(text_token) > max_length:
158
- truncated = True
159
- break
160
- # truncate
161
- if len(text_token) > max_length:
162
- truncated = True
163
- text_token = text_token[:max_length]
164
- text_weight = text_weight[:max_length]
165
- tokens.append(text_token)
166
- weights.append(text_weight)
167
- if truncated:
168
- logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
169
- return tokens, weights
170
-
171
-
172
- def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
173
- r"""
174
- Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
175
- """
176
- max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
177
- weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
178
- for i in range(len(tokens)):
179
- tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
180
- if no_boseos_middle:
181
- weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
182
- else:
183
- w = []
184
- if len(weights[i]) == 0:
185
- w = [1.0] * weights_length
186
- else:
187
- for j in range(max_embeddings_multiples):
188
- w.append(1.0) # weight for starting token in this chunk
189
- w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
190
- w.append(1.0) # weight for ending token in this chunk
191
- w += [1.0] * (weights_length - len(w))
192
- weights[i] = w[:]
193
-
194
- return tokens, weights
195
-
196
-
197
- def get_unweighted_text_embeddings(
198
- pipe: DiffusionPipeline,
199
- text_input: torch.Tensor,
200
- chunk_length: int,
201
- no_boseos_middle: Optional[bool] = True,
202
- ):
203
- """
204
- When the length of tokens is a multiple of the capacity of the text encoder,
205
- it should be split into chunks and sent to the text encoder individually.
206
- """
207
- max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
208
- if max_embeddings_multiples > 1:
209
- text_embeddings = []
210
- for i in range(max_embeddings_multiples):
211
- # extract the i-th chunk
212
- text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
213
-
214
- # cover the head and the tail by the starting and the ending tokens
215
- text_input_chunk[:, 0] = text_input[0, 0]
216
- text_input_chunk[:, -1] = text_input[0, -1]
217
- text_embedding = pipe.text_encoder(text_input_chunk)[0]
218
-
219
- if no_boseos_middle:
220
- if i == 0:
221
- # discard the ending token
222
- text_embedding = text_embedding[:, :-1]
223
- elif i == max_embeddings_multiples - 1:
224
- # discard the starting token
225
- text_embedding = text_embedding[:, 1:]
226
- else:
227
- # discard both starting and ending tokens
228
- text_embedding = text_embedding[:, 1:-1]
229
-
230
- text_embeddings.append(text_embedding)
231
- text_embeddings = torch.concat(text_embeddings, axis=1)
232
- else:
233
- text_embeddings = pipe.text_encoder(text_input)[0]
234
- return text_embeddings
235
-
236
-
237
- def get_weighted_text_embeddings(
238
- pipe: DiffusionPipeline,
239
- prompt: Union[str, List[str]],
240
- uncond_prompt: Optional[Union[str, List[str]]] = None,
241
- max_embeddings_multiples: Optional[int] = 3,
242
- no_boseos_middle: Optional[bool] = False,
243
- skip_parsing: Optional[bool] = False,
244
- skip_weighting: Optional[bool] = False,
245
- ):
246
- r"""
247
- Prompts can be assigned with local weights using brackets. For example,
248
- prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
249
- and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
250
-
251
- Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
252
-
253
- Args:
254
- pipe (`DiffusionPipeline`):
255
- Pipe to provide access to the tokenizer and the text encoder.
256
- prompt (`str` or `List[str]`):
257
- The prompt or prompts to guide the image generation.
258
- uncond_prompt (`str` or `List[str]`):
259
- The unconditional prompt or prompts for guide the image generation. If unconditional prompt
260
- is provided, the embeddings of prompt and uncond_prompt are concatenated.
261
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
262
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
263
- no_boseos_middle (`bool`, *optional*, defaults to `False`):
264
- If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
265
- ending token in each of the chunk in the middle.
266
- skip_parsing (`bool`, *optional*, defaults to `False`):
267
- Skip the parsing of brackets.
268
- skip_weighting (`bool`, *optional*, defaults to `False`):
269
- Skip the weighting. When the parsing is skipped, it is forced True.
270
- """
271
- max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
272
- if isinstance(prompt, str):
273
- prompt = [prompt]
274
-
275
- if not skip_parsing:
276
- prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
277
- if uncond_prompt is not None:
278
- if isinstance(uncond_prompt, str):
279
- uncond_prompt = [uncond_prompt]
280
- uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
281
- else:
282
- prompt_tokens = [
283
- token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
284
- ]
285
- prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
286
- if uncond_prompt is not None:
287
- if isinstance(uncond_prompt, str):
288
- uncond_prompt = [uncond_prompt]
289
- uncond_tokens = [
290
- token[1:-1]
291
- for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
292
- ]
293
- uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
294
-
295
- # round up the longest length of tokens to a multiple of (model_max_length - 2)
296
- max_length = max([len(token) for token in prompt_tokens])
297
- if uncond_prompt is not None:
298
- max_length = max(max_length, max([len(token) for token in uncond_tokens]))
299
-
300
- max_embeddings_multiples = min(
301
- max_embeddings_multiples,
302
- (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
303
- )
304
- max_embeddings_multiples = max(1, max_embeddings_multiples)
305
- max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
306
-
307
- # pad the length of tokens and weights
308
- bos = pipe.tokenizer.bos_token_id
309
- eos = pipe.tokenizer.eos_token_id
310
- pad = getattr(pipe.tokenizer, "pad_token_id", eos)
311
- prompt_tokens, prompt_weights = pad_tokens_and_weights(
312
- prompt_tokens,
313
- prompt_weights,
314
- max_length,
315
- bos,
316
- eos,
317
- pad,
318
- no_boseos_middle=no_boseos_middle,
319
- chunk_length=pipe.tokenizer.model_max_length,
320
- )
321
- prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
322
- if uncond_prompt is not None:
323
- uncond_tokens, uncond_weights = pad_tokens_and_weights(
324
- uncond_tokens,
325
- uncond_weights,
326
- max_length,
327
- bos,
328
- eos,
329
- pad,
330
- no_boseos_middle=no_boseos_middle,
331
- chunk_length=pipe.tokenizer.model_max_length,
332
- )
333
- uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
334
-
335
- # get the embeddings
336
- text_embeddings = get_unweighted_text_embeddings(
337
- pipe,
338
- prompt_tokens,
339
- pipe.tokenizer.model_max_length,
340
- no_boseos_middle=no_boseos_middle,
341
- )
342
- prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
343
- if uncond_prompt is not None:
344
- uncond_embeddings = get_unweighted_text_embeddings(
345
- pipe,
346
- uncond_tokens,
347
- pipe.tokenizer.model_max_length,
348
- no_boseos_middle=no_boseos_middle,
349
- )
350
- uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
351
-
352
- # assign weights to the prompts and normalize in the sense of mean
353
- # TODO: should we normalize by chunk or in a whole (current implementation)?
354
- if (not skip_parsing) and (not skip_weighting):
355
- previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
356
- text_embeddings *= prompt_weights.unsqueeze(-1)
357
- current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
358
- text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
359
- if uncond_prompt is not None:
360
- previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
361
- uncond_embeddings *= uncond_weights.unsqueeze(-1)
362
- current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
363
- uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
364
-
365
- if uncond_prompt is not None:
366
- return text_embeddings, uncond_embeddings
367
- return text_embeddings, None
368
-
369
-
370
- def preprocess_image(image, batch_size):
371
- w, h = image.size
372
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
373
- image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
374
- image = np.array(image).astype(np.float32) / 255.0
375
- image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
376
- image = torch.from_numpy(image)
377
- return 2.0 * image - 1.0
378
-
379
-
380
- def preprocess_mask(mask, batch_size, scale_factor=8):
381
- if not isinstance(mask, torch.Tensor):
382
- mask = mask.convert("L")
383
- w, h = mask.size
384
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
385
- mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
386
- mask = np.array(mask).astype(np.float32) / 255.0
387
- mask = np.tile(mask, (4, 1, 1))
388
- mask = np.vstack([mask[None]] * batch_size)
389
- mask = 1 - mask # repaint white, keep black
390
- mask = torch.from_numpy(mask)
391
- return mask
392
-
393
- else:
394
- valid_mask_channel_sizes = [1, 3]
395
- # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
396
- if mask.shape[3] in valid_mask_channel_sizes:
397
- mask = mask.permute(0, 3, 1, 2)
398
- elif mask.shape[1] not in valid_mask_channel_sizes:
399
- raise ValueError(
400
- f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
401
- f" but received mask of shape {tuple(mask.shape)}"
402
- )
403
- # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
404
- mask = mask.mean(dim=1, keepdim=True)
405
- h, w = mask.shape[-2:]
406
- h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
407
- mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
408
- return mask
409
-
410
-
411
- class StableDiffusionLongPromptWeightingPipeline(
412
- DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
413
- ):
414
- r"""
415
- Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
416
- weighting in prompt.
417
-
418
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
419
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
420
-
421
- Args:
422
- vae ([`AutoencoderKL`]):
423
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
424
- text_encoder ([`CLIPTextModel`]):
425
- Frozen text-encoder. Stable Diffusion uses the text portion of
426
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
427
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
428
- tokenizer (`CLIPTokenizer`):
429
- Tokenizer of class
430
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
431
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
432
- scheduler ([`SchedulerMixin`]):
433
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
434
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
435
- safety_checker ([`StableDiffusionSafetyChecker`]):
436
- Classification module that estimates whether generated images could be considered offensive or harmful.
437
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
438
- feature_extractor ([`CLIPImageProcessor`]):
439
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
440
- """
441
-
442
- model_cpu_offload_seq = "text_encoder-->unet->vae"
443
- _optional_components = ["safety_checker", "feature_extractor"]
444
- _exclude_from_cpu_offload = ["safety_checker"]
445
-
446
- def __init__(
447
- self,
448
- vae: AutoencoderKL,
449
- text_encoder: CLIPTextModel,
450
- tokenizer: CLIPTokenizer,
451
- unet: UNet2DConditionModel,
452
- scheduler: KarrasDiffusionSchedulers,
453
- safety_checker: StableDiffusionSafetyChecker,
454
- feature_extractor: CLIPImageProcessor,
455
- requires_safety_checker: bool = True,
456
- ):
457
- super().__init__()
458
-
459
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
460
- deprecation_message = (
461
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
462
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
463
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
464
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
465
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
466
- " file"
467
- )
468
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
469
- new_config = dict(scheduler.config)
470
- new_config["steps_offset"] = 1
471
- scheduler._internal_dict = FrozenDict(new_config)
472
-
473
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
474
- deprecation_message = (
475
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
476
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
477
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
478
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
479
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
480
- )
481
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
482
- new_config = dict(scheduler.config)
483
- new_config["clip_sample"] = False
484
- scheduler._internal_dict = FrozenDict(new_config)
485
-
486
- if safety_checker is None and requires_safety_checker:
487
- logger.warning(
488
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
489
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
490
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
491
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
492
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
493
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
494
- )
495
-
496
- if safety_checker is not None and feature_extractor is None:
497
- raise ValueError(
498
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
499
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
500
- )
501
-
502
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
503
- version.parse(unet.config._diffusers_version).base_version
504
- ) < version.parse("0.9.0.dev0")
505
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
506
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
507
- deprecation_message = (
508
- "The configuration file of the unet has set the default `sample_size` to smaller than"
509
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
510
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
511
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
512
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
513
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
514
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
515
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
516
- " the `unet/config.json` file"
517
- )
518
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
519
- new_config = dict(unet.config)
520
- new_config["sample_size"] = 64
521
- unet._internal_dict = FrozenDict(new_config)
522
- self.register_modules(
523
- vae=vae,
524
- text_encoder=text_encoder,
525
- tokenizer=tokenizer,
526
- unet=unet,
527
- scheduler=scheduler,
528
- safety_checker=safety_checker,
529
- feature_extractor=feature_extractor,
530
- )
531
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
532
-
533
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
534
- self.register_to_config(
535
- requires_safety_checker=requires_safety_checker,
536
- )
537
-
538
- def _encode_prompt(
539
- self,
540
- prompt,
541
- device,
542
- num_images_per_prompt,
543
- do_classifier_free_guidance,
544
- negative_prompt=None,
545
- max_embeddings_multiples=3,
546
- prompt_embeds: Optional[torch.Tensor] = None,
547
- negative_prompt_embeds: Optional[torch.Tensor] = None,
548
- ):
549
- r"""
550
- Encodes the prompt into text encoder hidden states.
551
-
552
- Args:
553
- prompt (`str` or `list(int)`):
554
- prompt to be encoded
555
- device: (`torch.device`):
556
- torch device
557
- num_images_per_prompt (`int`):
558
- number of images that should be generated per prompt
559
- do_classifier_free_guidance (`bool`):
560
- whether to use classifier free guidance or not
561
- negative_prompt (`str` or `List[str]`):
562
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
563
- if `guidance_scale` is less than `1`).
564
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
565
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
566
- """
567
- if prompt is not None and isinstance(prompt, str):
568
- batch_size = 1
569
- elif prompt is not None and isinstance(prompt, list):
570
- batch_size = len(prompt)
571
- else:
572
- batch_size = prompt_embeds.shape[0]
573
-
574
- if negative_prompt_embeds is None:
575
- if negative_prompt is None:
576
- negative_prompt = [""] * batch_size
577
- elif isinstance(negative_prompt, str):
578
- negative_prompt = [negative_prompt] * batch_size
579
- if batch_size != len(negative_prompt):
580
- raise ValueError(
581
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
582
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
583
- " the batch size of `prompt`."
584
- )
585
- if prompt_embeds is None or negative_prompt_embeds is None:
586
- if isinstance(self, TextualInversionLoaderMixin):
587
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
588
- if do_classifier_free_guidance and negative_prompt_embeds is None:
589
- negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
590
-
591
- prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
592
- pipe=self,
593
- prompt=prompt,
594
- uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
595
- max_embeddings_multiples=max_embeddings_multiples,
596
- )
597
- if prompt_embeds is None:
598
- prompt_embeds = prompt_embeds1
599
- if negative_prompt_embeds is None:
600
- negative_prompt_embeds = negative_prompt_embeds1
601
-
602
- bs_embed, seq_len, _ = prompt_embeds.shape
603
- # duplicate text embeddings for each generation per prompt, using mps friendly method
604
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
605
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
606
-
607
- if do_classifier_free_guidance:
608
- bs_embed, seq_len, _ = negative_prompt_embeds.shape
609
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
610
- negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
611
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
612
-
613
- return prompt_embeds
614
-
615
- def check_inputs(
616
- self,
617
- prompt,
618
- height,
619
- width,
620
- strength,
621
- callback_steps,
622
- negative_prompt=None,
623
- prompt_embeds=None,
624
- negative_prompt_embeds=None,
625
- ):
626
- if height % 8 != 0 or width % 8 != 0:
627
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
628
-
629
- if strength < 0 or strength > 1:
630
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
631
-
632
- if (callback_steps is None) or (
633
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
634
- ):
635
- raise ValueError(
636
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
637
- f" {type(callback_steps)}."
638
- )
639
-
640
- if prompt is not None and prompt_embeds is not None:
641
- raise ValueError(
642
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
643
- " only forward one of the two."
644
- )
645
- elif prompt is None and prompt_embeds is None:
646
- raise ValueError(
647
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
648
- )
649
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
650
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
651
-
652
- if negative_prompt is not None and negative_prompt_embeds is not None:
653
- raise ValueError(
654
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
655
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
656
- )
657
-
658
- if prompt_embeds is not None and negative_prompt_embeds is not None:
659
- if prompt_embeds.shape != negative_prompt_embeds.shape:
660
- raise ValueError(
661
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
662
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
663
- f" {negative_prompt_embeds.shape}."
664
- )
665
-
666
- def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
667
- if is_text2img:
668
- return self.scheduler.timesteps.to(device), num_inference_steps
669
- else:
670
- # get the original timestep using init_timestep
671
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
672
-
673
- t_start = max(num_inference_steps - init_timestep, 0)
674
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
675
-
676
- return timesteps, num_inference_steps - t_start
677
-
678
- def run_safety_checker(self, image, device, dtype):
679
- if self.safety_checker is not None:
680
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
681
- image, has_nsfw_concept = self.safety_checker(
682
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
683
- )
684
- else:
685
- has_nsfw_concept = None
686
- return image, has_nsfw_concept
687
-
688
- def decode_latents(self, latents):
689
- latents = 1 / self.vae.config.scaling_factor * latents
690
- image = self.vae.decode(latents).sample
691
- image = (image / 2 + 0.5).clamp(0, 1)
692
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
693
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
694
- return image
695
-
696
- def prepare_extra_step_kwargs(self, generator, eta):
697
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
698
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
699
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
700
- # and should be between [0, 1]
701
-
702
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
703
- extra_step_kwargs = {}
704
- if accepts_eta:
705
- extra_step_kwargs["eta"] = eta
706
-
707
- # check if the scheduler accepts generator
708
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
709
- if accepts_generator:
710
- extra_step_kwargs["generator"] = generator
711
- return extra_step_kwargs
712
-
713
- def prepare_latents(
714
- self,
715
- image,
716
- timestep,
717
- num_images_per_prompt,
718
- batch_size,
719
- num_channels_latents,
720
- height,
721
- width,
722
- dtype,
723
- device,
724
- generator,
725
- latents=None,
726
- ):
727
- if image is None:
728
- batch_size = batch_size * num_images_per_prompt
729
- shape = (
730
- batch_size,
731
- num_channels_latents,
732
- int(height) // self.vae_scale_factor,
733
- int(width) // self.vae_scale_factor,
734
- )
735
- if isinstance(generator, list) and len(generator) != batch_size:
736
- raise ValueError(
737
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
738
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
739
- )
740
-
741
- if latents is None:
742
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
743
- else:
744
- latents = latents.to(device)
745
-
746
- # scale the initial noise by the standard deviation required by the scheduler
747
- latents = latents * self.scheduler.init_noise_sigma
748
- return latents, None, None
749
- else:
750
- image = image.to(device=self.device, dtype=dtype)
751
- init_latent_dist = self.vae.encode(image).latent_dist
752
- init_latents = init_latent_dist.sample(generator=generator)
753
- init_latents = self.vae.config.scaling_factor * init_latents
754
-
755
- # Expand init_latents for batch_size and num_images_per_prompt
756
- init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
757
- init_latents_orig = init_latents
758
-
759
- # add noise to latents using the timesteps
760
- noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
761
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
762
- latents = init_latents
763
- return latents, init_latents_orig, noise
764
-
765
- @torch.no_grad()
766
- def __call__(
767
- self,
768
- prompt: Union[str, List[str]],
769
- negative_prompt: Optional[Union[str, List[str]]] = None,
770
- image: Union[torch.Tensor, PIL.Image.Image] = None,
771
- mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
772
- height: int = 512,
773
- width: int = 512,
774
- num_inference_steps: int = 50,
775
- guidance_scale: float = 7.5,
776
- strength: float = 0.8,
777
- num_images_per_prompt: Optional[int] = 1,
778
- add_predicted_noise: Optional[bool] = False,
779
- eta: float = 0.0,
780
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
781
- latents: Optional[torch.Tensor] = None,
782
- prompt_embeds: Optional[torch.Tensor] = None,
783
- negative_prompt_embeds: Optional[torch.Tensor] = None,
784
- max_embeddings_multiples: Optional[int] = 3,
785
- output_type: Optional[str] = "pil",
786
- return_dict: bool = True,
787
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
788
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
789
- callback_steps: int = 1,
790
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
791
- ):
792
- r"""
793
- Function invoked when calling the pipeline for generation.
794
-
795
- Args:
796
- prompt (`str` or `List[str]`):
797
- The prompt or prompts to guide the image generation.
798
- negative_prompt (`str` or `List[str]`, *optional*):
799
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
800
- if `guidance_scale` is less than `1`).
801
- image (`torch.Tensor` or `PIL.Image.Image`):
802
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
803
- process.
804
- mask_image (`torch.Tensor` or `PIL.Image.Image`):
805
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
806
- replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
807
- PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
808
- contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
809
- height (`int`, *optional*, defaults to 512):
810
- The height in pixels of the generated image.
811
- width (`int`, *optional*, defaults to 512):
812
- The width in pixels of the generated image.
813
- num_inference_steps (`int`, *optional*, defaults to 50):
814
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
815
- expense of slower inference.
816
- guidance_scale (`float`, *optional*, defaults to 7.5):
817
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
818
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
819
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
820
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
821
- usually at the expense of lower image quality.
822
- strength (`float`, *optional*, defaults to 0.8):
823
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
824
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
825
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
826
- noise will be maximum and the denoising process will run for the full number of iterations specified in
827
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
828
- num_images_per_prompt (`int`, *optional*, defaults to 1):
829
- The number of images to generate per prompt.
830
- add_predicted_noise (`bool`, *optional*, defaults to True):
831
- Use predicted noise instead of random noise when constructing noisy versions of the original image in
832
- the reverse diffusion process
833
- eta (`float`, *optional*, defaults to 0.0):
834
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
835
- [`schedulers.DDIMScheduler`], will be ignored for others.
836
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
837
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
838
- to make generation deterministic.
839
- latents (`torch.Tensor`, *optional*):
840
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
841
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
842
- tensor will ge generated by sampling using the supplied random `generator`.
843
- prompt_embeds (`torch.Tensor`, *optional*):
844
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
845
- provided, text embeddings will be generated from `prompt` input argument.
846
- negative_prompt_embeds (`torch.Tensor`, *optional*):
847
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
848
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
849
- argument.
850
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
851
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
852
- output_type (`str`, *optional*, defaults to `"pil"`):
853
- The output format of the generate image. Choose between
854
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
855
- return_dict (`bool`, *optional*, defaults to `True`):
856
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
857
- plain tuple.
858
- callback (`Callable`, *optional*):
859
- A function that will be called every `callback_steps` steps during inference. The function will be
860
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
861
- is_cancelled_callback (`Callable`, *optional*):
862
- A function that will be called every `callback_steps` steps during inference. If the function returns
863
- `True`, the inference will be cancelled.
864
- callback_steps (`int`, *optional*, defaults to 1):
865
- The frequency at which the `callback` function will be called. If not specified, the callback will be
866
- called at every step.
867
- cross_attention_kwargs (`dict`, *optional*):
868
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
869
- `self.processor` in
870
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
871
-
872
- Returns:
873
- `None` if cancelled by `is_cancelled_callback`,
874
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
875
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
876
- When returning a tuple, the first element is a list with the generated images, and the second element is a
877
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
878
- (nsfw) content, according to the `safety_checker`.
879
- """
880
- # 0. Default height and width to unet
881
- height = height or self.unet.config.sample_size * self.vae_scale_factor
882
- width = width or self.unet.config.sample_size * self.vae_scale_factor
883
-
884
- # 1. Check inputs. Raise error if not correct
885
- self.check_inputs(
886
- prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
887
- )
888
-
889
- # 2. Define call parameters
890
- if prompt is not None and isinstance(prompt, str):
891
- batch_size = 1
892
- elif prompt is not None and isinstance(prompt, list):
893
- batch_size = len(prompt)
894
- else:
895
- batch_size = prompt_embeds.shape[0]
896
-
897
- device = self._execution_device
898
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
899
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
900
- # corresponds to doing no classifier free guidance.
901
- do_classifier_free_guidance = guidance_scale > 1.0
902
-
903
- # 3. Encode input prompt
904
- prompt_embeds = self._encode_prompt(
905
- prompt,
906
- device,
907
- num_images_per_prompt,
908
- do_classifier_free_guidance,
909
- negative_prompt,
910
- max_embeddings_multiples,
911
- prompt_embeds=prompt_embeds,
912
- negative_prompt_embeds=negative_prompt_embeds,
913
- )
914
- dtype = prompt_embeds.dtype
915
-
916
- # 4. Preprocess image and mask
917
- if isinstance(image, PIL.Image.Image):
918
- image = preprocess_image(image, batch_size)
919
- if image is not None:
920
- image = image.to(device=self.device, dtype=dtype)
921
- if isinstance(mask_image, PIL.Image.Image):
922
- mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
923
- if mask_image is not None:
924
- mask = mask_image.to(device=self.device, dtype=dtype)
925
- mask = torch.cat([mask] * num_images_per_prompt)
926
- else:
927
- mask = None
928
-
929
- # 5. set timesteps
930
- self.scheduler.set_timesteps(num_inference_steps, device=device)
931
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
932
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
933
-
934
- # 6. Prepare latent variables
935
- latents, init_latents_orig, noise = self.prepare_latents(
936
- image,
937
- latent_timestep,
938
- num_images_per_prompt,
939
- batch_size,
940
- self.unet.config.in_channels,
941
- height,
942
- width,
943
- dtype,
944
- device,
945
- generator,
946
- latents,
947
- )
948
-
949
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
950
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
951
-
952
- # 8. Denoising loop
953
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
954
- with self.progress_bar(total=num_inference_steps) as progress_bar:
955
- for i, t in enumerate(timesteps):
956
- # expand the latents if we are doing classifier free guidance
957
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
958
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
959
-
960
- # predict the noise residual
961
- noise_pred = self.unet(
962
- latent_model_input,
963
- t,
964
- encoder_hidden_states=prompt_embeds,
965
- cross_attention_kwargs=cross_attention_kwargs,
966
- ).sample
967
-
968
- # perform guidance
969
- if do_classifier_free_guidance:
970
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
971
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
972
-
973
- # compute the previous noisy sample x_t -> x_t-1
974
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
975
-
976
- if mask is not None:
977
- # masking
978
- if add_predicted_noise:
979
- init_latents_proper = self.scheduler.add_noise(
980
- init_latents_orig, noise_pred_uncond, torch.tensor([t])
981
- )
982
- else:
983
- init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
984
- latents = (init_latents_proper * mask) + (latents * (1 - mask))
985
-
986
- # call the callback, if provided
987
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
988
- progress_bar.update()
989
- if i % callback_steps == 0:
990
- if callback is not None:
991
- step_idx = i // getattr(self.scheduler, "order", 1)
992
- callback(step_idx, t, latents)
993
- if is_cancelled_callback is not None and is_cancelled_callback():
994
- return None
995
-
996
- if output_type == "latent":
997
- image = latents
998
- has_nsfw_concept = None
999
- elif output_type == "pil":
1000
- # 9. Post-processing
1001
- image = self.decode_latents(latents)
1002
-
1003
- # 10. Run safety checker
1004
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1005
-
1006
- # 11. Convert to PIL
1007
- image = self.numpy_to_pil(image)
1008
- else:
1009
- # 9. Post-processing
1010
- image = self.decode_latents(latents)
1011
-
1012
- # 10. Run safety checker
1013
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1014
-
1015
- # Offload last model to CPU
1016
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1017
- self.final_offload_hook.offload()
1018
-
1019
- if not return_dict:
1020
- return image, has_nsfw_concept
1021
-
1022
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1023
-
1024
- def text2img(
1025
- self,
1026
- prompt: Union[str, List[str]],
1027
- negative_prompt: Optional[Union[str, List[str]]] = None,
1028
- height: int = 512,
1029
- width: int = 512,
1030
- num_inference_steps: int = 50,
1031
- guidance_scale: float = 7.5,
1032
- num_images_per_prompt: Optional[int] = 1,
1033
- eta: float = 0.0,
1034
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1035
- latents: Optional[torch.Tensor] = None,
1036
- prompt_embeds: Optional[torch.Tensor] = None,
1037
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1038
- max_embeddings_multiples: Optional[int] = 3,
1039
- output_type: Optional[str] = "pil",
1040
- return_dict: bool = True,
1041
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1042
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
1043
- callback_steps: int = 1,
1044
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1045
- ):
1046
- r"""
1047
- Function for text-to-image generation.
1048
- Args:
1049
- prompt (`str` or `List[str]`):
1050
- The prompt or prompts to guide the image generation.
1051
- negative_prompt (`str` or `List[str]`, *optional*):
1052
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1053
- if `guidance_scale` is less than `1`).
1054
- height (`int`, *optional*, defaults to 512):
1055
- The height in pixels of the generated image.
1056
- width (`int`, *optional*, defaults to 512):
1057
- The width in pixels of the generated image.
1058
- num_inference_steps (`int`, *optional*, defaults to 50):
1059
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1060
- expense of slower inference.
1061
- guidance_scale (`float`, *optional*, defaults to 7.5):
1062
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1063
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1064
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1065
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1066
- usually at the expense of lower image quality.
1067
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1068
- The number of images to generate per prompt.
1069
- eta (`float`, *optional*, defaults to 0.0):
1070
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1071
- [`schedulers.DDIMScheduler`], will be ignored for others.
1072
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1073
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1074
- to make generation deterministic.
1075
- latents (`torch.Tensor`, *optional*):
1076
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1077
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1078
- tensor will ge generated by sampling using the supplied random `generator`.
1079
- prompt_embeds (`torch.Tensor`, *optional*):
1080
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1081
- provided, text embeddings will be generated from `prompt` input argument.
1082
- negative_prompt_embeds (`torch.Tensor`, *optional*):
1083
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1084
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1085
- argument.
1086
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1087
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
1088
- output_type (`str`, *optional*, defaults to `"pil"`):
1089
- The output format of the generate image. Choose between
1090
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1091
- return_dict (`bool`, *optional*, defaults to `True`):
1092
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1093
- plain tuple.
1094
- callback (`Callable`, *optional*):
1095
- A function that will be called every `callback_steps` steps during inference. The function will be
1096
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
1097
- is_cancelled_callback (`Callable`, *optional*):
1098
- A function that will be called every `callback_steps` steps during inference. If the function returns
1099
- `True`, the inference will be cancelled.
1100
- callback_steps (`int`, *optional*, defaults to 1):
1101
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1102
- called at every step.
1103
- cross_attention_kwargs (`dict`, *optional*):
1104
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1105
- `self.processor` in
1106
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1107
-
1108
- Returns:
1109
- `None` if cancelled by `is_cancelled_callback`,
1110
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1111
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1112
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1113
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1114
- (nsfw) content, according to the `safety_checker`.
1115
- """
1116
- return self.__call__(
1117
- prompt=prompt,
1118
- negative_prompt=negative_prompt,
1119
- height=height,
1120
- width=width,
1121
- num_inference_steps=num_inference_steps,
1122
- guidance_scale=guidance_scale,
1123
- num_images_per_prompt=num_images_per_prompt,
1124
- eta=eta,
1125
- generator=generator,
1126
- latents=latents,
1127
- prompt_embeds=prompt_embeds,
1128
- negative_prompt_embeds=negative_prompt_embeds,
1129
- max_embeddings_multiples=max_embeddings_multiples,
1130
- output_type=output_type,
1131
- return_dict=return_dict,
1132
- callback=callback,
1133
- is_cancelled_callback=is_cancelled_callback,
1134
- callback_steps=callback_steps,
1135
- cross_attention_kwargs=cross_attention_kwargs,
1136
- )
1137
-
1138
- def img2img(
1139
- self,
1140
- image: Union[torch.Tensor, PIL.Image.Image],
1141
- prompt: Union[str, List[str]],
1142
- negative_prompt: Optional[Union[str, List[str]]] = None,
1143
- strength: float = 0.8,
1144
- num_inference_steps: Optional[int] = 50,
1145
- guidance_scale: Optional[float] = 7.5,
1146
- num_images_per_prompt: Optional[int] = 1,
1147
- eta: Optional[float] = 0.0,
1148
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1149
- prompt_embeds: Optional[torch.Tensor] = None,
1150
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1151
- max_embeddings_multiples: Optional[int] = 3,
1152
- output_type: Optional[str] = "pil",
1153
- return_dict: bool = True,
1154
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1155
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
1156
- callback_steps: int = 1,
1157
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1158
- ):
1159
- r"""
1160
- Function for image-to-image generation.
1161
- Args:
1162
- image (`torch.Tensor` or `PIL.Image.Image`):
1163
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
1164
- process.
1165
- prompt (`str` or `List[str]`):
1166
- The prompt or prompts to guide the image generation.
1167
- negative_prompt (`str` or `List[str]`, *optional*):
1168
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1169
- if `guidance_scale` is less than `1`).
1170
- strength (`float`, *optional*, defaults to 0.8):
1171
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
1172
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
1173
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
1174
- noise will be maximum and the denoising process will run for the full number of iterations specified in
1175
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
1176
- num_inference_steps (`int`, *optional*, defaults to 50):
1177
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1178
- expense of slower inference. This parameter will be modulated by `strength`.
1179
- guidance_scale (`float`, *optional*, defaults to 7.5):
1180
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1181
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1182
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1183
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1184
- usually at the expense of lower image quality.
1185
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1186
- The number of images to generate per prompt.
1187
- eta (`float`, *optional*, defaults to 0.0):
1188
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1189
- [`schedulers.DDIMScheduler`], will be ignored for others.
1190
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1191
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1192
- to make generation deterministic.
1193
- prompt_embeds (`torch.Tensor`, *optional*):
1194
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1195
- provided, text embeddings will be generated from `prompt` input argument.
1196
- negative_prompt_embeds (`torch.Tensor`, *optional*):
1197
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1198
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1199
- argument.
1200
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1201
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
1202
- output_type (`str`, *optional*, defaults to `"pil"`):
1203
- The output format of the generate image. Choose between
1204
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1205
- return_dict (`bool`, *optional*, defaults to `True`):
1206
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1207
- plain tuple.
1208
- callback (`Callable`, *optional*):
1209
- A function that will be called every `callback_steps` steps during inference. The function will be
1210
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
1211
- is_cancelled_callback (`Callable`, *optional*):
1212
- A function that will be called every `callback_steps` steps during inference. If the function returns
1213
- `True`, the inference will be cancelled.
1214
- callback_steps (`int`, *optional*, defaults to 1):
1215
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1216
- called at every step.
1217
- cross_attention_kwargs (`dict`, *optional*):
1218
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1219
- `self.processor` in
1220
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1221
-
1222
- Returns:
1223
- `None` if cancelled by `is_cancelled_callback`,
1224
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1225
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1226
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1227
- (nsfw) content, according to the `safety_checker`.
1228
- """
1229
- return self.__call__(
1230
- prompt=prompt,
1231
- negative_prompt=negative_prompt,
1232
- image=image,
1233
- num_inference_steps=num_inference_steps,
1234
- guidance_scale=guidance_scale,
1235
- strength=strength,
1236
- num_images_per_prompt=num_images_per_prompt,
1237
- eta=eta,
1238
- generator=generator,
1239
- prompt_embeds=prompt_embeds,
1240
- negative_prompt_embeds=negative_prompt_embeds,
1241
- max_embeddings_multiples=max_embeddings_multiples,
1242
- output_type=output_type,
1243
- return_dict=return_dict,
1244
- callback=callback,
1245
- is_cancelled_callback=is_cancelled_callback,
1246
- callback_steps=callback_steps,
1247
- cross_attention_kwargs=cross_attention_kwargs,
1248
- )
1249
-
1250
- def inpaint(
1251
- self,
1252
- image: Union[torch.Tensor, PIL.Image.Image],
1253
- mask_image: Union[torch.Tensor, PIL.Image.Image],
1254
- prompt: Union[str, List[str]],
1255
- negative_prompt: Optional[Union[str, List[str]]] = None,
1256
- strength: float = 0.8,
1257
- num_inference_steps: Optional[int] = 50,
1258
- guidance_scale: Optional[float] = 7.5,
1259
- num_images_per_prompt: Optional[int] = 1,
1260
- add_predicted_noise: Optional[bool] = False,
1261
- eta: Optional[float] = 0.0,
1262
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1263
- prompt_embeds: Optional[torch.Tensor] = None,
1264
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1265
- max_embeddings_multiples: Optional[int] = 3,
1266
- output_type: Optional[str] = "pil",
1267
- return_dict: bool = True,
1268
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1269
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
1270
- callback_steps: int = 1,
1271
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1272
- ):
1273
- r"""
1274
- Function for inpaint.
1275
- Args:
1276
- image (`torch.Tensor` or `PIL.Image.Image`):
1277
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
1278
- process. This is the image whose masked region will be inpainted.
1279
- mask_image (`torch.Tensor` or `PIL.Image.Image`):
1280
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1281
- replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1282
- PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1283
- contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1284
- prompt (`str` or `List[str]`):
1285
- The prompt or prompts to guide the image generation.
1286
- negative_prompt (`str` or `List[str]`, *optional*):
1287
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1288
- if `guidance_scale` is less than `1`).
1289
- strength (`float`, *optional*, defaults to 0.8):
1290
- Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1291
- is 1, the denoising process will be run on the masked area for the full number of iterations specified
1292
- in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1293
- noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1294
- num_inference_steps (`int`, *optional*, defaults to 50):
1295
- The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1296
- the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1297
- guidance_scale (`float`, *optional*, defaults to 7.5):
1298
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1299
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1300
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1301
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1302
- usually at the expense of lower image quality.
1303
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1304
- The number of images to generate per prompt.
1305
- add_predicted_noise (`bool`, *optional*, defaults to True):
1306
- Use predicted noise instead of random noise when constructing noisy versions of the original image in
1307
- the reverse diffusion process
1308
- eta (`float`, *optional*, defaults to 0.0):
1309
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1310
- [`schedulers.DDIMScheduler`], will be ignored for others.
1311
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1312
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1313
- to make generation deterministic.
1314
- prompt_embeds (`torch.Tensor`, *optional*):
1315
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1316
- provided, text embeddings will be generated from `prompt` input argument.
1317
- negative_prompt_embeds (`torch.Tensor`, *optional*):
1318
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1319
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1320
- argument.
1321
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1322
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
1323
- output_type (`str`, *optional*, defaults to `"pil"`):
1324
- The output format of the generate image. Choose between
1325
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1326
- return_dict (`bool`, *optional*, defaults to `True`):
1327
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1328
- plain tuple.
1329
- callback (`Callable`, *optional*):
1330
- A function that will be called every `callback_steps` steps during inference. The function will be
1331
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
1332
- is_cancelled_callback (`Callable`, *optional*):
1333
- A function that will be called every `callback_steps` steps during inference. If the function returns
1334
- `True`, the inference will be cancelled.
1335
- callback_steps (`int`, *optional*, defaults to 1):
1336
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1337
- called at every step.
1338
- cross_attention_kwargs (`dict`, *optional*):
1339
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1340
- `self.processor` in
1341
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1342
-
1343
- Returns:
1344
- `None` if cancelled by `is_cancelled_callback`,
1345
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1346
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1347
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1348
- (nsfw) content, according to the `safety_checker`.
1349
- """
1350
- return self.__call__(
1351
- prompt=prompt,
1352
- negative_prompt=negative_prompt,
1353
- image=image,
1354
- mask_image=mask_image,
1355
- num_inference_steps=num_inference_steps,
1356
- guidance_scale=guidance_scale,
1357
- strength=strength,
1358
- num_images_per_prompt=num_images_per_prompt,
1359
- add_predicted_noise=add_predicted_noise,
1360
- eta=eta,
1361
- generator=generator,
1362
- prompt_embeds=prompt_embeds,
1363
- negative_prompt_embeds=negative_prompt_embeds,
1364
- max_embeddings_multiples=max_embeddings_multiples,
1365
- output_type=output_type,
1366
- return_dict=return_dict,
1367
- callback=callback,
1368
- is_cancelled_callback=is_cancelled_callback,
1369
- callback_steps=callback_steps,
1370
- cross_attention_kwargs=cross_attention_kwargs,
1371
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lpw_stable_diffusion_onnx.py DELETED
@@ -1,1148 +0,0 @@
1
- import inspect
2
- import re
3
- from typing import Callable, List, Optional, Union
4
-
5
- import numpy as np
6
- import PIL.Image
7
- import torch
8
- from packaging import version
9
- from transformers import CLIPImageProcessor, CLIPTokenizer
10
-
11
- import diffusers
12
- from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin
13
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
- from diffusers.utils import logging
15
-
16
-
17
- try:
18
- from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE
19
- except ImportError:
20
- ORT_TO_NP_TYPE = {
21
- "tensor(bool)": np.bool_,
22
- "tensor(int8)": np.int8,
23
- "tensor(uint8)": np.uint8,
24
- "tensor(int16)": np.int16,
25
- "tensor(uint16)": np.uint16,
26
- "tensor(int32)": np.int32,
27
- "tensor(uint32)": np.uint32,
28
- "tensor(int64)": np.int64,
29
- "tensor(uint64)": np.uint64,
30
- "tensor(float16)": np.float16,
31
- "tensor(float)": np.float32,
32
- "tensor(double)": np.float64,
33
- }
34
-
35
- try:
36
- from diffusers.utils import PIL_INTERPOLATION
37
- except ImportError:
38
- if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
39
- PIL_INTERPOLATION = {
40
- "linear": PIL.Image.Resampling.BILINEAR,
41
- "bilinear": PIL.Image.Resampling.BILINEAR,
42
- "bicubic": PIL.Image.Resampling.BICUBIC,
43
- "lanczos": PIL.Image.Resampling.LANCZOS,
44
- "nearest": PIL.Image.Resampling.NEAREST,
45
- }
46
- else:
47
- PIL_INTERPOLATION = {
48
- "linear": PIL.Image.LINEAR,
49
- "bilinear": PIL.Image.BILINEAR,
50
- "bicubic": PIL.Image.BICUBIC,
51
- "lanczos": PIL.Image.LANCZOS,
52
- "nearest": PIL.Image.NEAREST,
53
- }
54
- # ------------------------------------------------------------------------------
55
-
56
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
57
-
58
- re_attention = re.compile(
59
- r"""
60
- \\\(|
61
- \\\)|
62
- \\\[|
63
- \\]|
64
- \\\\|
65
- \\|
66
- \(|
67
- \[|
68
- :([+-]?[.\d]+)\)|
69
- \)|
70
- ]|
71
- [^\\()\[\]:]+|
72
- :
73
- """,
74
- re.X,
75
- )
76
-
77
-
78
- def parse_prompt_attention(text):
79
- """
80
- Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
81
- Accepted tokens are:
82
- (abc) - increases attention to abc by a multiplier of 1.1
83
- (abc:3.12) - increases attention to abc by a multiplier of 3.12
84
- [abc] - decreases attention to abc by a multiplier of 1.1
85
- \\( - literal character '('
86
- \\[ - literal character '['
87
- \\) - literal character ')'
88
- \\] - literal character ']'
89
- \\ - literal character '\'
90
- anything else - just text
91
- >>> parse_prompt_attention('normal text')
92
- [['normal text', 1.0]]
93
- >>> parse_prompt_attention('an (important) word')
94
- [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
95
- >>> parse_prompt_attention('(unbalanced')
96
- [['unbalanced', 1.1]]
97
- >>> parse_prompt_attention('\\(literal\\]')
98
- [['(literal]', 1.0]]
99
- >>> parse_prompt_attention('(unnecessary)(parens)')
100
- [['unnecessaryparens', 1.1]]
101
- >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
102
- [['a ', 1.0],
103
- ['house', 1.5730000000000004],
104
- [' ', 1.1],
105
- ['on', 1.0],
106
- [' a ', 1.1],
107
- ['hill', 0.55],
108
- [', sun, ', 1.1],
109
- ['sky', 1.4641000000000006],
110
- ['.', 1.1]]
111
- """
112
-
113
- res = []
114
- round_brackets = []
115
- square_brackets = []
116
-
117
- round_bracket_multiplier = 1.1
118
- square_bracket_multiplier = 1 / 1.1
119
-
120
- def multiply_range(start_position, multiplier):
121
- for p in range(start_position, len(res)):
122
- res[p][1] *= multiplier
123
-
124
- for m in re_attention.finditer(text):
125
- text = m.group(0)
126
- weight = m.group(1)
127
-
128
- if text.startswith("\\"):
129
- res.append([text[1:], 1.0])
130
- elif text == "(":
131
- round_brackets.append(len(res))
132
- elif text == "[":
133
- square_brackets.append(len(res))
134
- elif weight is not None and len(round_brackets) > 0:
135
- multiply_range(round_brackets.pop(), float(weight))
136
- elif text == ")" and len(round_brackets) > 0:
137
- multiply_range(round_brackets.pop(), round_bracket_multiplier)
138
- elif text == "]" and len(square_brackets) > 0:
139
- multiply_range(square_brackets.pop(), square_bracket_multiplier)
140
- else:
141
- res.append([text, 1.0])
142
-
143
- for pos in round_brackets:
144
- multiply_range(pos, round_bracket_multiplier)
145
-
146
- for pos in square_brackets:
147
- multiply_range(pos, square_bracket_multiplier)
148
-
149
- if len(res) == 0:
150
- res = [["", 1.0]]
151
-
152
- # merge runs of identical weights
153
- i = 0
154
- while i + 1 < len(res):
155
- if res[i][1] == res[i + 1][1]:
156
- res[i][0] += res[i + 1][0]
157
- res.pop(i + 1)
158
- else:
159
- i += 1
160
-
161
- return res
162
-
163
-
164
- def get_prompts_with_weights(pipe, prompt: List[str], max_length: int):
165
- r"""
166
- Tokenize a list of prompts and return its tokens with weights of each token.
167
-
168
- No padding, starting or ending token is included.
169
- """
170
- tokens = []
171
- weights = []
172
- truncated = False
173
- for text in prompt:
174
- texts_and_weights = parse_prompt_attention(text)
175
- text_token = []
176
- text_weight = []
177
- for word, weight in texts_and_weights:
178
- # tokenize and discard the starting and the ending token
179
- token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1]
180
- text_token += list(token)
181
- # copy the weight by length of token
182
- text_weight += [weight] * len(token)
183
- # stop if the text is too long (longer than truncation limit)
184
- if len(text_token) > max_length:
185
- truncated = True
186
- break
187
- # truncate
188
- if len(text_token) > max_length:
189
- truncated = True
190
- text_token = text_token[:max_length]
191
- text_weight = text_weight[:max_length]
192
- tokens.append(text_token)
193
- weights.append(text_weight)
194
- if truncated:
195
- logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
196
- return tokens, weights
197
-
198
-
199
- def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
200
- r"""
201
- Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
202
- """
203
- max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
204
- weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
205
- for i in range(len(tokens)):
206
- tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
207
- if no_boseos_middle:
208
- weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
209
- else:
210
- w = []
211
- if len(weights[i]) == 0:
212
- w = [1.0] * weights_length
213
- else:
214
- for j in range(max_embeddings_multiples):
215
- w.append(1.0) # weight for starting token in this chunk
216
- w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
217
- w.append(1.0) # weight for ending token in this chunk
218
- w += [1.0] * (weights_length - len(w))
219
- weights[i] = w[:]
220
-
221
- return tokens, weights
222
-
223
-
224
- def get_unweighted_text_embeddings(
225
- pipe,
226
- text_input: np.array,
227
- chunk_length: int,
228
- no_boseos_middle: Optional[bool] = True,
229
- ):
230
- """
231
- When the length of tokens is a multiple of the capacity of the text encoder,
232
- it should be split into chunks and sent to the text encoder individually.
233
- """
234
- max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
235
- if max_embeddings_multiples > 1:
236
- text_embeddings = []
237
- for i in range(max_embeddings_multiples):
238
- # extract the i-th chunk
239
- text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy()
240
-
241
- # cover the head and the tail by the starting and the ending tokens
242
- text_input_chunk[:, 0] = text_input[0, 0]
243
- text_input_chunk[:, -1] = text_input[0, -1]
244
-
245
- text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0]
246
-
247
- if no_boseos_middle:
248
- if i == 0:
249
- # discard the ending token
250
- text_embedding = text_embedding[:, :-1]
251
- elif i == max_embeddings_multiples - 1:
252
- # discard the starting token
253
- text_embedding = text_embedding[:, 1:]
254
- else:
255
- # discard both starting and ending tokens
256
- text_embedding = text_embedding[:, 1:-1]
257
-
258
- text_embeddings.append(text_embedding)
259
- text_embeddings = np.concatenate(text_embeddings, axis=1)
260
- else:
261
- text_embeddings = pipe.text_encoder(input_ids=text_input)[0]
262
- return text_embeddings
263
-
264
-
265
- def get_weighted_text_embeddings(
266
- pipe,
267
- prompt: Union[str, List[str]],
268
- uncond_prompt: Optional[Union[str, List[str]]] = None,
269
- max_embeddings_multiples: Optional[int] = 4,
270
- no_boseos_middle: Optional[bool] = False,
271
- skip_parsing: Optional[bool] = False,
272
- skip_weighting: Optional[bool] = False,
273
- **kwargs,
274
- ):
275
- r"""
276
- Prompts can be assigned with local weights using brackets. For example,
277
- prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
278
- and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
279
-
280
- Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
281
-
282
- Args:
283
- pipe (`OnnxStableDiffusionPipeline`):
284
- Pipe to provide access to the tokenizer and the text encoder.
285
- prompt (`str` or `List[str]`):
286
- The prompt or prompts to guide the image generation.
287
- uncond_prompt (`str` or `List[str]`):
288
- The unconditional prompt or prompts for guide the image generation. If unconditional prompt
289
- is provided, the embeddings of prompt and uncond_prompt are concatenated.
290
- max_embeddings_multiples (`int`, *optional*, defaults to `1`):
291
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
292
- no_boseos_middle (`bool`, *optional*, defaults to `False`):
293
- If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
294
- ending token in each of the chunk in the middle.
295
- skip_parsing (`bool`, *optional*, defaults to `False`):
296
- Skip the parsing of brackets.
297
- skip_weighting (`bool`, *optional*, defaults to `False`):
298
- Skip the weighting. When the parsing is skipped, it is forced True.
299
- """
300
- max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
301
- if isinstance(prompt, str):
302
- prompt = [prompt]
303
-
304
- if not skip_parsing:
305
- prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
306
- if uncond_prompt is not None:
307
- if isinstance(uncond_prompt, str):
308
- uncond_prompt = [uncond_prompt]
309
- uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
310
- else:
311
- prompt_tokens = [
312
- token[1:-1]
313
- for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids
314
- ]
315
- prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
316
- if uncond_prompt is not None:
317
- if isinstance(uncond_prompt, str):
318
- uncond_prompt = [uncond_prompt]
319
- uncond_tokens = [
320
- token[1:-1]
321
- for token in pipe.tokenizer(
322
- uncond_prompt,
323
- max_length=max_length,
324
- truncation=True,
325
- return_tensors="np",
326
- ).input_ids
327
- ]
328
- uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
329
-
330
- # round up the longest length of tokens to a multiple of (model_max_length - 2)
331
- max_length = max([len(token) for token in prompt_tokens])
332
- if uncond_prompt is not None:
333
- max_length = max(max_length, max([len(token) for token in uncond_tokens]))
334
-
335
- max_embeddings_multiples = min(
336
- max_embeddings_multiples,
337
- (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
338
- )
339
- max_embeddings_multiples = max(1, max_embeddings_multiples)
340
- max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
341
-
342
- # pad the length of tokens and weights
343
- bos = pipe.tokenizer.bos_token_id
344
- eos = pipe.tokenizer.eos_token_id
345
- pad = getattr(pipe.tokenizer, "pad_token_id", eos)
346
- prompt_tokens, prompt_weights = pad_tokens_and_weights(
347
- prompt_tokens,
348
- prompt_weights,
349
- max_length,
350
- bos,
351
- eos,
352
- pad,
353
- no_boseos_middle=no_boseos_middle,
354
- chunk_length=pipe.tokenizer.model_max_length,
355
- )
356
- prompt_tokens = np.array(prompt_tokens, dtype=np.int32)
357
- if uncond_prompt is not None:
358
- uncond_tokens, uncond_weights = pad_tokens_and_weights(
359
- uncond_tokens,
360
- uncond_weights,
361
- max_length,
362
- bos,
363
- eos,
364
- pad,
365
- no_boseos_middle=no_boseos_middle,
366
- chunk_length=pipe.tokenizer.model_max_length,
367
- )
368
- uncond_tokens = np.array(uncond_tokens, dtype=np.int32)
369
-
370
- # get the embeddings
371
- text_embeddings = get_unweighted_text_embeddings(
372
- pipe,
373
- prompt_tokens,
374
- pipe.tokenizer.model_max_length,
375
- no_boseos_middle=no_boseos_middle,
376
- )
377
- prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)
378
- if uncond_prompt is not None:
379
- uncond_embeddings = get_unweighted_text_embeddings(
380
- pipe,
381
- uncond_tokens,
382
- pipe.tokenizer.model_max_length,
383
- no_boseos_middle=no_boseos_middle,
384
- )
385
- uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype)
386
-
387
- # assign weights to the prompts and normalize in the sense of mean
388
- # TODO: should we normalize by chunk or in a whole (current implementation)?
389
- if (not skip_parsing) and (not skip_weighting):
390
- previous_mean = text_embeddings.mean(axis=(-2, -1))
391
- text_embeddings *= prompt_weights[:, :, None]
392
- text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]
393
- if uncond_prompt is not None:
394
- previous_mean = uncond_embeddings.mean(axis=(-2, -1))
395
- uncond_embeddings *= uncond_weights[:, :, None]
396
- uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None]
397
-
398
- # For classifier free guidance, we need to do two forward passes.
399
- # Here we concatenate the unconditional and text embeddings into a single batch
400
- # to avoid doing two forward passes
401
- if uncond_prompt is not None:
402
- return text_embeddings, uncond_embeddings
403
-
404
- return text_embeddings
405
-
406
-
407
- def preprocess_image(image):
408
- w, h = image.size
409
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
410
- image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
411
- image = np.array(image).astype(np.float32) / 255.0
412
- image = image[None].transpose(0, 3, 1, 2)
413
- return 2.0 * image - 1.0
414
-
415
-
416
- def preprocess_mask(mask, scale_factor=8):
417
- mask = mask.convert("L")
418
- w, h = mask.size
419
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
420
- mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
421
- mask = np.array(mask).astype(np.float32) / 255.0
422
- mask = np.tile(mask, (4, 1, 1))
423
- mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
424
- mask = 1 - mask # repaint white, keep black
425
- return mask
426
-
427
-
428
- class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline):
429
- r"""
430
- Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
431
- weighting in prompt.
432
-
433
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
434
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
435
- """
436
-
437
- if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
438
-
439
- def __init__(
440
- self,
441
- vae_encoder: OnnxRuntimeModel,
442
- vae_decoder: OnnxRuntimeModel,
443
- text_encoder: OnnxRuntimeModel,
444
- tokenizer: CLIPTokenizer,
445
- unet: OnnxRuntimeModel,
446
- scheduler: SchedulerMixin,
447
- safety_checker: OnnxRuntimeModel,
448
- feature_extractor: CLIPImageProcessor,
449
- requires_safety_checker: bool = True,
450
- ):
451
- super().__init__(
452
- vae_encoder=vae_encoder,
453
- vae_decoder=vae_decoder,
454
- text_encoder=text_encoder,
455
- tokenizer=tokenizer,
456
- unet=unet,
457
- scheduler=scheduler,
458
- safety_checker=safety_checker,
459
- feature_extractor=feature_extractor,
460
- requires_safety_checker=requires_safety_checker,
461
- )
462
- self.__init__additional__()
463
-
464
- else:
465
-
466
- def __init__(
467
- self,
468
- vae_encoder: OnnxRuntimeModel,
469
- vae_decoder: OnnxRuntimeModel,
470
- text_encoder: OnnxRuntimeModel,
471
- tokenizer: CLIPTokenizer,
472
- unet: OnnxRuntimeModel,
473
- scheduler: SchedulerMixin,
474
- safety_checker: OnnxRuntimeModel,
475
- feature_extractor: CLIPImageProcessor,
476
- ):
477
- super().__init__(
478
- vae_encoder=vae_encoder,
479
- vae_decoder=vae_decoder,
480
- text_encoder=text_encoder,
481
- tokenizer=tokenizer,
482
- unet=unet,
483
- scheduler=scheduler,
484
- safety_checker=safety_checker,
485
- feature_extractor=feature_extractor,
486
- )
487
- self.__init__additional__()
488
-
489
- def __init__additional__(self):
490
- self.unet.config.in_channels = 4
491
- self.vae_scale_factor = 8
492
-
493
- def _encode_prompt(
494
- self,
495
- prompt,
496
- num_images_per_prompt,
497
- do_classifier_free_guidance,
498
- negative_prompt,
499
- max_embeddings_multiples,
500
- ):
501
- r"""
502
- Encodes the prompt into text encoder hidden states.
503
-
504
- Args:
505
- prompt (`str` or `list(int)`):
506
- prompt to be encoded
507
- num_images_per_prompt (`int`):
508
- number of images that should be generated per prompt
509
- do_classifier_free_guidance (`bool`):
510
- whether to use classifier free guidance or not
511
- negative_prompt (`str` or `List[str]`):
512
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
513
- if `guidance_scale` is less than `1`).
514
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
515
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
516
- """
517
- batch_size = len(prompt) if isinstance(prompt, list) else 1
518
-
519
- if negative_prompt is None:
520
- negative_prompt = [""] * batch_size
521
- elif isinstance(negative_prompt, str):
522
- negative_prompt = [negative_prompt] * batch_size
523
- if batch_size != len(negative_prompt):
524
- raise ValueError(
525
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
526
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
527
- " the batch size of `prompt`."
528
- )
529
-
530
- text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
531
- pipe=self,
532
- prompt=prompt,
533
- uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
534
- max_embeddings_multiples=max_embeddings_multiples,
535
- )
536
-
537
- text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0)
538
- if do_classifier_free_guidance:
539
- uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0)
540
- text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
541
-
542
- return text_embeddings
543
-
544
- def check_inputs(self, prompt, height, width, strength, callback_steps):
545
- if not isinstance(prompt, str) and not isinstance(prompt, list):
546
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
547
-
548
- if strength < 0 or strength > 1:
549
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
550
-
551
- if height % 8 != 0 or width % 8 != 0:
552
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
553
-
554
- if (callback_steps is None) or (
555
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
556
- ):
557
- raise ValueError(
558
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
559
- f" {type(callback_steps)}."
560
- )
561
-
562
- def get_timesteps(self, num_inference_steps, strength, is_text2img):
563
- if is_text2img:
564
- return self.scheduler.timesteps, num_inference_steps
565
- else:
566
- # get the original timestep using init_timestep
567
- offset = self.scheduler.config.get("steps_offset", 0)
568
- init_timestep = int(num_inference_steps * strength) + offset
569
- init_timestep = min(init_timestep, num_inference_steps)
570
-
571
- t_start = max(num_inference_steps - init_timestep + offset, 0)
572
- timesteps = self.scheduler.timesteps[t_start:]
573
- return timesteps, num_inference_steps - t_start
574
-
575
- def run_safety_checker(self, image):
576
- if self.safety_checker is not None:
577
- safety_checker_input = self.feature_extractor(
578
- self.numpy_to_pil(image), return_tensors="np"
579
- ).pixel_values.astype(image.dtype)
580
- # There will throw an error if use safety_checker directly and batchsize>1
581
- images, has_nsfw_concept = [], []
582
- for i in range(image.shape[0]):
583
- image_i, has_nsfw_concept_i = self.safety_checker(
584
- clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
585
- )
586
- images.append(image_i)
587
- has_nsfw_concept.append(has_nsfw_concept_i[0])
588
- image = np.concatenate(images)
589
- else:
590
- has_nsfw_concept = None
591
- return image, has_nsfw_concept
592
-
593
- def decode_latents(self, latents):
594
- latents = 1 / 0.18215 * latents
595
- # image = self.vae_decoder(latent_sample=latents)[0]
596
- # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
597
- image = np.concatenate(
598
- [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
599
- )
600
- image = np.clip(image / 2 + 0.5, 0, 1)
601
- image = image.transpose((0, 2, 3, 1))
602
- return image
603
-
604
- def prepare_extra_step_kwargs(self, generator, eta):
605
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
606
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
607
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
608
- # and should be between [0, 1]
609
-
610
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
611
- extra_step_kwargs = {}
612
- if accepts_eta:
613
- extra_step_kwargs["eta"] = eta
614
-
615
- # check if the scheduler accepts generator
616
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
617
- if accepts_generator:
618
- extra_step_kwargs["generator"] = generator
619
- return extra_step_kwargs
620
-
621
- def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None):
622
- if image is None:
623
- shape = (
624
- batch_size,
625
- self.unet.config.in_channels,
626
- height // self.vae_scale_factor,
627
- width // self.vae_scale_factor,
628
- )
629
-
630
- if latents is None:
631
- latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
632
- else:
633
- if latents.shape != shape:
634
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
635
-
636
- # scale the initial noise by the standard deviation required by the scheduler
637
- latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy()
638
- return latents, None, None
639
- else:
640
- init_latents = self.vae_encoder(sample=image)[0]
641
- init_latents = 0.18215 * init_latents
642
- init_latents = np.concatenate([init_latents] * batch_size, axis=0)
643
- init_latents_orig = init_latents
644
- shape = init_latents.shape
645
-
646
- # add noise to latents using the timesteps
647
- noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
648
- latents = self.scheduler.add_noise(
649
- torch.from_numpy(init_latents), torch.from_numpy(noise), timestep
650
- ).numpy()
651
- return latents, init_latents_orig, noise
652
-
653
- @torch.no_grad()
654
- def __call__(
655
- self,
656
- prompt: Union[str, List[str]],
657
- negative_prompt: Optional[Union[str, List[str]]] = None,
658
- image: Union[np.ndarray, PIL.Image.Image] = None,
659
- mask_image: Union[np.ndarray, PIL.Image.Image] = None,
660
- height: int = 512,
661
- width: int = 512,
662
- num_inference_steps: int = 50,
663
- guidance_scale: float = 7.5,
664
- strength: float = 0.8,
665
- num_images_per_prompt: Optional[int] = 1,
666
- eta: float = 0.0,
667
- generator: Optional[torch.Generator] = None,
668
- latents: Optional[np.ndarray] = None,
669
- max_embeddings_multiples: Optional[int] = 3,
670
- output_type: Optional[str] = "pil",
671
- return_dict: bool = True,
672
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
673
- is_cancelled_callback: Optional[Callable[[], bool]] = None,
674
- callback_steps: int = 1,
675
- **kwargs,
676
- ):
677
- r"""
678
- Function invoked when calling the pipeline for generation.
679
-
680
- Args:
681
- prompt (`str` or `List[str]`):
682
- The prompt or prompts to guide the image generation.
683
- negative_prompt (`str` or `List[str]`, *optional*):
684
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
685
- if `guidance_scale` is less than `1`).
686
- image (`np.ndarray` or `PIL.Image.Image`):
687
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
688
- process.
689
- mask_image (`np.ndarray` or `PIL.Image.Image`):
690
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
691
- replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
692
- PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
693
- contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
694
- height (`int`, *optional*, defaults to 512):
695
- The height in pixels of the generated image.
696
- width (`int`, *optional*, defaults to 512):
697
- The width in pixels of the generated image.
698
- num_inference_steps (`int`, *optional*, defaults to 50):
699
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
700
- expense of slower inference.
701
- guidance_scale (`float`, *optional*, defaults to 7.5):
702
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
703
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
704
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
705
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
706
- usually at the expense of lower image quality.
707
- strength (`float`, *optional*, defaults to 0.8):
708
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
709
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
710
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
711
- noise will be maximum and the denoising process will run for the full number of iterations specified in
712
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
713
- num_images_per_prompt (`int`, *optional*, defaults to 1):
714
- The number of images to generate per prompt.
715
- eta (`float`, *optional*, defaults to 0.0):
716
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
717
- [`schedulers.DDIMScheduler`], will be ignored for others.
718
- generator (`torch.Generator`, *optional*):
719
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
720
- deterministic.
721
- latents (`np.ndarray`, *optional*):
722
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
723
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
724
- tensor will ge generated by sampling using the supplied random `generator`.
725
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
726
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
727
- output_type (`str`, *optional*, defaults to `"pil"`):
728
- The output format of the generate image. Choose between
729
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
730
- return_dict (`bool`, *optional*, defaults to `True`):
731
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
732
- plain tuple.
733
- callback (`Callable`, *optional*):
734
- A function that will be called every `callback_steps` steps during inference. The function will be
735
- called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
736
- is_cancelled_callback (`Callable`, *optional*):
737
- A function that will be called every `callback_steps` steps during inference. If the function returns
738
- `True`, the inference will be cancelled.
739
- callback_steps (`int`, *optional*, defaults to 1):
740
- The frequency at which the `callback` function will be called. If not specified, the callback will be
741
- called at every step.
742
-
743
- Returns:
744
- `None` if cancelled by `is_cancelled_callback`,
745
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
746
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
747
- When returning a tuple, the first element is a list with the generated images, and the second element is a
748
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
749
- (nsfw) content, according to the `safety_checker`.
750
- """
751
- # 0. Default height and width to unet
752
- height = height or self.unet.config.sample_size * self.vae_scale_factor
753
- width = width or self.unet.config.sample_size * self.vae_scale_factor
754
-
755
- # 1. Check inputs. Raise error if not correct
756
- self.check_inputs(prompt, height, width, strength, callback_steps)
757
-
758
- # 2. Define call parameters
759
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
760
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
761
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
762
- # corresponds to doing no classifier free guidance.
763
- do_classifier_free_guidance = guidance_scale > 1.0
764
-
765
- # 3. Encode input prompt
766
- text_embeddings = self._encode_prompt(
767
- prompt,
768
- num_images_per_prompt,
769
- do_classifier_free_guidance,
770
- negative_prompt,
771
- max_embeddings_multiples,
772
- )
773
- dtype = text_embeddings.dtype
774
-
775
- # 4. Preprocess image and mask
776
- if isinstance(image, PIL.Image.Image):
777
- image = preprocess_image(image)
778
- if image is not None:
779
- image = image.astype(dtype)
780
- if isinstance(mask_image, PIL.Image.Image):
781
- mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
782
- if mask_image is not None:
783
- mask = mask_image.astype(dtype)
784
- mask = np.concatenate([mask] * batch_size * num_images_per_prompt)
785
- else:
786
- mask = None
787
-
788
- # 5. set timesteps
789
- self.scheduler.set_timesteps(num_inference_steps)
790
- timestep_dtype = next(
791
- (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
792
- )
793
- timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
794
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None)
795
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
796
-
797
- # 6. Prepare latent variables
798
- latents, init_latents_orig, noise = self.prepare_latents(
799
- image,
800
- latent_timestep,
801
- batch_size * num_images_per_prompt,
802
- height,
803
- width,
804
- dtype,
805
- generator,
806
- latents,
807
- )
808
-
809
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
810
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
811
-
812
- # 8. Denoising loop
813
- for i, t in enumerate(self.progress_bar(timesteps)):
814
- # expand the latents if we are doing classifier free guidance
815
- latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
816
- latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
817
- latent_model_input = latent_model_input.numpy()
818
-
819
- # predict the noise residual
820
- noise_pred = self.unet(
821
- sample=latent_model_input,
822
- timestep=np.array([t], dtype=timestep_dtype),
823
- encoder_hidden_states=text_embeddings,
824
- )
825
- noise_pred = noise_pred[0]
826
-
827
- # perform guidance
828
- if do_classifier_free_guidance:
829
- noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
830
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
831
-
832
- # compute the previous noisy sample x_t -> x_t-1
833
- scheduler_output = self.scheduler.step(
834
- torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
835
- )
836
- latents = scheduler_output.prev_sample.numpy()
837
-
838
- if mask is not None:
839
- # masking
840
- init_latents_proper = self.scheduler.add_noise(
841
- torch.from_numpy(init_latents_orig),
842
- torch.from_numpy(noise),
843
- t,
844
- ).numpy()
845
- latents = (init_latents_proper * mask) + (latents * (1 - mask))
846
-
847
- # call the callback, if provided
848
- if i % callback_steps == 0:
849
- if callback is not None:
850
- step_idx = i // getattr(self.scheduler, "order", 1)
851
- callback(step_idx, t, latents)
852
- if is_cancelled_callback is not None and is_cancelled_callback():
853
- return None
854
-
855
- # 9. Post-processing
856
- image = self.decode_latents(latents)
857
-
858
- # 10. Run safety checker
859
- image, has_nsfw_concept = self.run_safety_checker(image)
860
-
861
- # 11. Convert to PIL
862
- if output_type == "pil":
863
- image = self.numpy_to_pil(image)
864
-
865
- if not return_dict:
866
- return image, has_nsfw_concept
867
-
868
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
869
-
870
- def text2img(
871
- self,
872
- prompt: Union[str, List[str]],
873
- negative_prompt: Optional[Union[str, List[str]]] = None,
874
- height: int = 512,
875
- width: int = 512,
876
- num_inference_steps: int = 50,
877
- guidance_scale: float = 7.5,
878
- num_images_per_prompt: Optional[int] = 1,
879
- eta: float = 0.0,
880
- generator: Optional[torch.Generator] = None,
881
- latents: Optional[np.ndarray] = None,
882
- max_embeddings_multiples: Optional[int] = 3,
883
- output_type: Optional[str] = "pil",
884
- return_dict: bool = True,
885
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
886
- callback_steps: int = 1,
887
- **kwargs,
888
- ):
889
- r"""
890
- Function for text-to-image generation.
891
- Args:
892
- prompt (`str` or `List[str]`):
893
- The prompt or prompts to guide the image generation.
894
- negative_prompt (`str` or `List[str]`, *optional*):
895
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
896
- if `guidance_scale` is less than `1`).
897
- height (`int`, *optional*, defaults to 512):
898
- The height in pixels of the generated image.
899
- width (`int`, *optional*, defaults to 512):
900
- The width in pixels of the generated image.
901
- num_inference_steps (`int`, *optional*, defaults to 50):
902
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
903
- expense of slower inference.
904
- guidance_scale (`float`, *optional*, defaults to 7.5):
905
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
906
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
907
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
908
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
909
- usually at the expense of lower image quality.
910
- num_images_per_prompt (`int`, *optional*, defaults to 1):
911
- The number of images to generate per prompt.
912
- eta (`float`, *optional*, defaults to 0.0):
913
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
914
- [`schedulers.DDIMScheduler`], will be ignored for others.
915
- generator (`torch.Generator`, *optional*):
916
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
917
- deterministic.
918
- latents (`np.ndarray`, *optional*):
919
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
920
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
921
- tensor will ge generated by sampling using the supplied random `generator`.
922
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
923
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
924
- output_type (`str`, *optional*, defaults to `"pil"`):
925
- The output format of the generate image. Choose between
926
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
927
- return_dict (`bool`, *optional*, defaults to `True`):
928
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
929
- plain tuple.
930
- callback (`Callable`, *optional*):
931
- A function that will be called every `callback_steps` steps during inference. The function will be
932
- called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
933
- callback_steps (`int`, *optional*, defaults to 1):
934
- The frequency at which the `callback` function will be called. If not specified, the callback will be
935
- called at every step.
936
- Returns:
937
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
938
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
939
- When returning a tuple, the first element is a list with the generated images, and the second element is a
940
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
941
- (nsfw) content, according to the `safety_checker`.
942
- """
943
- return self.__call__(
944
- prompt=prompt,
945
- negative_prompt=negative_prompt,
946
- height=height,
947
- width=width,
948
- num_inference_steps=num_inference_steps,
949
- guidance_scale=guidance_scale,
950
- num_images_per_prompt=num_images_per_prompt,
951
- eta=eta,
952
- generator=generator,
953
- latents=latents,
954
- max_embeddings_multiples=max_embeddings_multiples,
955
- output_type=output_type,
956
- return_dict=return_dict,
957
- callback=callback,
958
- callback_steps=callback_steps,
959
- **kwargs,
960
- )
961
-
962
- def img2img(
963
- self,
964
- image: Union[np.ndarray, PIL.Image.Image],
965
- prompt: Union[str, List[str]],
966
- negative_prompt: Optional[Union[str, List[str]]] = None,
967
- strength: float = 0.8,
968
- num_inference_steps: Optional[int] = 50,
969
- guidance_scale: Optional[float] = 7.5,
970
- num_images_per_prompt: Optional[int] = 1,
971
- eta: Optional[float] = 0.0,
972
- generator: Optional[torch.Generator] = None,
973
- max_embeddings_multiples: Optional[int] = 3,
974
- output_type: Optional[str] = "pil",
975
- return_dict: bool = True,
976
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
977
- callback_steps: int = 1,
978
- **kwargs,
979
- ):
980
- r"""
981
- Function for image-to-image generation.
982
- Args:
983
- image (`np.ndarray` or `PIL.Image.Image`):
984
- `Image`, or ndarray representing an image batch, that will be used as the starting point for the
985
- process.
986
- prompt (`str` or `List[str]`):
987
- The prompt or prompts to guide the image generation.
988
- negative_prompt (`str` or `List[str]`, *optional*):
989
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
990
- if `guidance_scale` is less than `1`).
991
- strength (`float`, *optional*, defaults to 0.8):
992
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
993
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
994
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
995
- noise will be maximum and the denoising process will run for the full number of iterations specified in
996
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
997
- num_inference_steps (`int`, *optional*, defaults to 50):
998
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
999
- expense of slower inference. This parameter will be modulated by `strength`.
1000
- guidance_scale (`float`, *optional*, defaults to 7.5):
1001
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1002
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1003
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1004
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1005
- usually at the expense of lower image quality.
1006
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1007
- The number of images to generate per prompt.
1008
- eta (`float`, *optional*, defaults to 0.0):
1009
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1010
- [`schedulers.DDIMScheduler`], will be ignored for others.
1011
- generator (`torch.Generator`, *optional*):
1012
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1013
- deterministic.
1014
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1015
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
1016
- output_type (`str`, *optional*, defaults to `"pil"`):
1017
- The output format of the generate image. Choose between
1018
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1019
- return_dict (`bool`, *optional*, defaults to `True`):
1020
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1021
- plain tuple.
1022
- callback (`Callable`, *optional*):
1023
- A function that will be called every `callback_steps` steps during inference. The function will be
1024
- called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
1025
- callback_steps (`int`, *optional*, defaults to 1):
1026
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1027
- called at every step.
1028
- Returns:
1029
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1030
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1031
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1032
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1033
- (nsfw) content, according to the `safety_checker`.
1034
- """
1035
- return self.__call__(
1036
- prompt=prompt,
1037
- negative_prompt=negative_prompt,
1038
- image=image,
1039
- num_inference_steps=num_inference_steps,
1040
- guidance_scale=guidance_scale,
1041
- strength=strength,
1042
- num_images_per_prompt=num_images_per_prompt,
1043
- eta=eta,
1044
- generator=generator,
1045
- max_embeddings_multiples=max_embeddings_multiples,
1046
- output_type=output_type,
1047
- return_dict=return_dict,
1048
- callback=callback,
1049
- callback_steps=callback_steps,
1050
- **kwargs,
1051
- )
1052
-
1053
- def inpaint(
1054
- self,
1055
- image: Union[np.ndarray, PIL.Image.Image],
1056
- mask_image: Union[np.ndarray, PIL.Image.Image],
1057
- prompt: Union[str, List[str]],
1058
- negative_prompt: Optional[Union[str, List[str]]] = None,
1059
- strength: float = 0.8,
1060
- num_inference_steps: Optional[int] = 50,
1061
- guidance_scale: Optional[float] = 7.5,
1062
- num_images_per_prompt: Optional[int] = 1,
1063
- eta: Optional[float] = 0.0,
1064
- generator: Optional[torch.Generator] = None,
1065
- max_embeddings_multiples: Optional[int] = 3,
1066
- output_type: Optional[str] = "pil",
1067
- return_dict: bool = True,
1068
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
1069
- callback_steps: int = 1,
1070
- **kwargs,
1071
- ):
1072
- r"""
1073
- Function for inpaint.
1074
- Args:
1075
- image (`np.ndarray` or `PIL.Image.Image`):
1076
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
1077
- process. This is the image whose masked region will be inpainted.
1078
- mask_image (`np.ndarray` or `PIL.Image.Image`):
1079
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1080
- replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1081
- PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1082
- contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1083
- prompt (`str` or `List[str]`):
1084
- The prompt or prompts to guide the image generation.
1085
- negative_prompt (`str` or `List[str]`, *optional*):
1086
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1087
- if `guidance_scale` is less than `1`).
1088
- strength (`float`, *optional*, defaults to 0.8):
1089
- Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1090
- is 1, the denoising process will be run on the masked area for the full number of iterations specified
1091
- in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1092
- noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1093
- num_inference_steps (`int`, *optional*, defaults to 50):
1094
- The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1095
- the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1096
- guidance_scale (`float`, *optional*, defaults to 7.5):
1097
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1098
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1099
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1100
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1101
- usually at the expense of lower image quality.
1102
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1103
- The number of images to generate per prompt.
1104
- eta (`float`, *optional*, defaults to 0.0):
1105
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1106
- [`schedulers.DDIMScheduler`], will be ignored for others.
1107
- generator (`torch.Generator`, *optional*):
1108
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1109
- deterministic.
1110
- max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1111
- The max multiple length of prompt embeddings compared to the max output length of text encoder.
1112
- output_type (`str`, *optional*, defaults to `"pil"`):
1113
- The output format of the generate image. Choose between
1114
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1115
- return_dict (`bool`, *optional*, defaults to `True`):
1116
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1117
- plain tuple.
1118
- callback (`Callable`, *optional*):
1119
- A function that will be called every `callback_steps` steps during inference. The function will be
1120
- called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
1121
- callback_steps (`int`, *optional*, defaults to 1):
1122
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1123
- called at every step.
1124
- Returns:
1125
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1126
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1127
- When returning a tuple, the first element is a list with the generated images, and the second element is a
1128
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1129
- (nsfw) content, according to the `safety_checker`.
1130
- """
1131
- return self.__call__(
1132
- prompt=prompt,
1133
- negative_prompt=negative_prompt,
1134
- image=image,
1135
- mask_image=mask_image,
1136
- num_inference_steps=num_inference_steps,
1137
- guidance_scale=guidance_scale,
1138
- strength=strength,
1139
- num_images_per_prompt=num_images_per_prompt,
1140
- eta=eta,
1141
- generator=generator,
1142
- max_embeddings_multiples=max_embeddings_multiples,
1143
- output_type=output_type,
1144
- return_dict=return_dict,
1145
- callback=callback,
1146
- callback_steps=callback_steps,
1147
- **kwargs,
1148
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lpw_stable_diffusion_xl.py DELETED
The diff for this file is too large to render. See raw diff
 
magic_mix.py DELETED
@@ -1,152 +0,0 @@
1
- from typing import Union
2
-
3
- import torch
4
- from PIL import Image
5
- from torchvision import transforms as tfms
6
- from tqdm.auto import tqdm
7
- from transformers import CLIPTextModel, CLIPTokenizer
8
-
9
- from diffusers import (
10
- AutoencoderKL,
11
- DDIMScheduler,
12
- DiffusionPipeline,
13
- LMSDiscreteScheduler,
14
- PNDMScheduler,
15
- UNet2DConditionModel,
16
- )
17
-
18
-
19
- class MagicMixPipeline(DiffusionPipeline):
20
- def __init__(
21
- self,
22
- vae: AutoencoderKL,
23
- text_encoder: CLIPTextModel,
24
- tokenizer: CLIPTokenizer,
25
- unet: UNet2DConditionModel,
26
- scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
27
- ):
28
- super().__init__()
29
-
30
- self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
31
-
32
- # convert PIL image to latents
33
- def encode(self, img):
34
- with torch.no_grad():
35
- latent = self.vae.encode(tfms.ToTensor()(img).unsqueeze(0).to(self.device) * 2 - 1)
36
- latent = 0.18215 * latent.latent_dist.sample()
37
- return latent
38
-
39
- # convert latents to PIL image
40
- def decode(self, latent):
41
- latent = (1 / 0.18215) * latent
42
- with torch.no_grad():
43
- img = self.vae.decode(latent).sample
44
- img = (img / 2 + 0.5).clamp(0, 1)
45
- img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
46
- img = (img * 255).round().astype("uint8")
47
- return Image.fromarray(img[0])
48
-
49
- # convert prompt into text embeddings, also unconditional embeddings
50
- def prep_text(self, prompt):
51
- text_input = self.tokenizer(
52
- prompt,
53
- padding="max_length",
54
- max_length=self.tokenizer.model_max_length,
55
- truncation=True,
56
- return_tensors="pt",
57
- )
58
-
59
- text_embedding = self.text_encoder(text_input.input_ids.to(self.device))[0]
60
-
61
- uncond_input = self.tokenizer(
62
- "",
63
- padding="max_length",
64
- max_length=self.tokenizer.model_max_length,
65
- truncation=True,
66
- return_tensors="pt",
67
- )
68
-
69
- uncond_embedding = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
70
-
71
- return torch.cat([uncond_embedding, text_embedding])
72
-
73
- def __call__(
74
- self,
75
- img: Image.Image,
76
- prompt: str,
77
- kmin: float = 0.3,
78
- kmax: float = 0.6,
79
- mix_factor: float = 0.5,
80
- seed: int = 42,
81
- steps: int = 50,
82
- guidance_scale: float = 7.5,
83
- ) -> Image.Image:
84
- tmin = steps - int(kmin * steps)
85
- tmax = steps - int(kmax * steps)
86
-
87
- text_embeddings = self.prep_text(prompt)
88
-
89
- self.scheduler.set_timesteps(steps)
90
-
91
- width, height = img.size
92
- encoded = self.encode(img)
93
-
94
- torch.manual_seed(seed)
95
- noise = torch.randn(
96
- (1, self.unet.config.in_channels, height // 8, width // 8),
97
- ).to(self.device)
98
-
99
- latents = self.scheduler.add_noise(
100
- encoded,
101
- noise,
102
- timesteps=self.scheduler.timesteps[tmax],
103
- )
104
-
105
- input = torch.cat([latents] * 2)
106
-
107
- input = self.scheduler.scale_model_input(input, self.scheduler.timesteps[tmax])
108
-
109
- with torch.no_grad():
110
- pred = self.unet(
111
- input,
112
- self.scheduler.timesteps[tmax],
113
- encoder_hidden_states=text_embeddings,
114
- ).sample
115
-
116
- pred_uncond, pred_text = pred.chunk(2)
117
- pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
118
-
119
- latents = self.scheduler.step(pred, self.scheduler.timesteps[tmax], latents).prev_sample
120
-
121
- for i, t in enumerate(tqdm(self.scheduler.timesteps)):
122
- if i > tmax:
123
- if i < tmin: # layout generation phase
124
- orig_latents = self.scheduler.add_noise(
125
- encoded,
126
- noise,
127
- timesteps=t,
128
- )
129
-
130
- input = (
131
- (mix_factor * latents) + (1 - mix_factor) * orig_latents
132
- ) # interpolating between layout noise and conditionally generated noise to preserve layout sematics
133
- input = torch.cat([input] * 2)
134
-
135
- else: # content generation phase
136
- input = torch.cat([latents] * 2)
137
-
138
- input = self.scheduler.scale_model_input(input, t)
139
-
140
- with torch.no_grad():
141
- pred = self.unet(
142
- input,
143
- t,
144
- encoder_hidden_states=text_embeddings,
145
- ).sample
146
-
147
- pred_uncond, pred_text = pred.chunk(2)
148
- pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
149
-
150
- latents = self.scheduler.step(pred, t, latents).prev_sample
151
-
152
- return self.decode(latents)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
marigold_depth_estimation.py DELETED
@@ -1,673 +0,0 @@
1
- # Copyright 2024 Bingxin Ke, ETH Zurich and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # --------------------------------------------------------------------------
15
- # If you find this code useful, we kindly ask you to cite our paper in your work.
16
- # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
17
- # More information about the method can be found at https://marigoldmonodepth.github.io
18
- # --------------------------------------------------------------------------
19
-
20
-
21
- import logging
22
- import math
23
- from typing import Dict, Union
24
-
25
- import matplotlib
26
- import numpy as np
27
- import torch
28
- from PIL import Image
29
- from PIL.Image import Resampling
30
- from scipy.optimize import minimize
31
- from torch.utils.data import DataLoader, TensorDataset
32
- from tqdm.auto import tqdm
33
- from transformers import CLIPTextModel, CLIPTokenizer
34
-
35
- from diffusers import (
36
- AutoencoderKL,
37
- DDIMScheduler,
38
- DiffusionPipeline,
39
- LCMScheduler,
40
- UNet2DConditionModel,
41
- )
42
- from diffusers.utils import BaseOutput, check_min_version
43
-
44
-
45
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
46
- check_min_version("0.29.0")
47
-
48
- class MarigoldDepthOutput(BaseOutput):
49
- """
50
- Output class for Marigold monocular depth prediction pipeline.
51
-
52
- Args:
53
- depth_np (`np.ndarray`):
54
- Predicted depth map, with depth values in the range of [0, 1].
55
- depth_colored (`None` or `PIL.Image.Image`):
56
- Colorized depth map, with the shape of [3, H, W] and values in [0, 1].
57
- uncertainty (`None` or `np.ndarray`):
58
- Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling.
59
- """
60
-
61
- depth_np: np.ndarray
62
- depth_colored: Union[None, Image.Image]
63
- uncertainty: Union[None, np.ndarray]
64
-
65
-
66
- def get_pil_resample_method(method_str: str) -> Resampling:
67
- resample_method_dic = {
68
- "bilinear": Resampling.BILINEAR,
69
- "bicubic": Resampling.BICUBIC,
70
- "nearest": Resampling.NEAREST,
71
- }
72
- resample_method = resample_method_dic.get(method_str, None)
73
- if resample_method is None:
74
- raise ValueError(f"Unknown resampling method: {resample_method}")
75
- else:
76
- return resample_method
77
-
78
-
79
- class MarigoldPipeline(DiffusionPipeline):
80
- """
81
- Pipeline for monocular depth estimation using Marigold: https://marigoldmonodepth.github.io.
82
-
83
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
84
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
85
-
86
- Args:
87
- unet (`UNet2DConditionModel`):
88
- Conditional U-Net to denoise the depth latent, conditioned on image latent.
89
- vae (`AutoencoderKL`):
90
- Variational Auto-Encoder (VAE) Model to encode and decode images and depth maps
91
- to and from latent representations.
92
- scheduler (`DDIMScheduler`):
93
- A scheduler to be used in combination with `unet` to denoise the encoded image latents.
94
- text_encoder (`CLIPTextModel`):
95
- Text-encoder, for empty text embedding.
96
- tokenizer (`CLIPTokenizer`):
97
- CLIP tokenizer.
98
- """
99
-
100
- rgb_latent_scale_factor = 0.18215
101
- depth_latent_scale_factor = 0.18215
102
-
103
- def __init__(
104
- self,
105
- unet: UNet2DConditionModel,
106
- vae: AutoencoderKL,
107
- scheduler: DDIMScheduler,
108
- text_encoder: CLIPTextModel,
109
- tokenizer: CLIPTokenizer,
110
- ):
111
- super().__init__()
112
-
113
- self.register_modules(
114
- unet=unet,
115
- vae=vae,
116
- scheduler=scheduler,
117
- text_encoder=text_encoder,
118
- tokenizer=tokenizer,
119
- )
120
-
121
- self.empty_text_embed = None
122
-
123
- @torch.no_grad()
124
- def __call__(
125
- self,
126
- input_image: Image,
127
- denoising_steps: int = 10,
128
- ensemble_size: int = 10,
129
- processing_res: int = 768,
130
- match_input_res: bool = True,
131
- resample_method: str = "bilinear",
132
- batch_size: int = 0,
133
- seed: Union[int, None] = None,
134
- color_map: str = "Spectral",
135
- show_progress_bar: bool = True,
136
- ensemble_kwargs: Dict = None,
137
- ) -> MarigoldDepthOutput:
138
- """
139
- Function invoked when calling the pipeline.
140
-
141
- Args:
142
- input_image (`Image`):
143
- Input RGB (or gray-scale) image.
144
- processing_res (`int`, *optional*, defaults to `768`):
145
- Maximum resolution of processing.
146
- If set to 0: will not resize at all.
147
- match_input_res (`bool`, *optional*, defaults to `True`):
148
- Resize depth prediction to match input resolution.
149
- Only valid if `processing_res` > 0.
150
- resample_method: (`str`, *optional*, defaults to `bilinear`):
151
- Resampling method used to resize images and depth predictions. This can be one of `bilinear`, `bicubic` or `nearest`, defaults to: `bilinear`.
152
- denoising_steps (`int`, *optional*, defaults to `10`):
153
- Number of diffusion denoising steps (DDIM) during inference.
154
- ensemble_size (`int`, *optional*, defaults to `10`):
155
- Number of predictions to be ensembled.
156
- batch_size (`int`, *optional*, defaults to `0`):
157
- Inference batch size, no bigger than `num_ensemble`.
158
- If set to 0, the script will automatically decide the proper batch size.
159
- seed (`int`, *optional*, defaults to `None`)
160
- Reproducibility seed.
161
- show_progress_bar (`bool`, *optional*, defaults to `True`):
162
- Display a progress bar of diffusion denoising.
163
- color_map (`str`, *optional*, defaults to `"Spectral"`, pass `None` to skip colorized depth map generation):
164
- Colormap used to colorize the depth map.
165
- ensemble_kwargs (`dict`, *optional*, defaults to `None`):
166
- Arguments for detailed ensembling settings.
167
- Returns:
168
- `MarigoldDepthOutput`: Output class for Marigold monocular depth prediction pipeline, including:
169
- - **depth_np** (`np.ndarray`) Predicted depth map, with depth values in the range of [0, 1]
170
- - **depth_colored** (`PIL.Image.Image`) Colorized depth map, with the shape of [3, H, W] and values in [0, 1], None if `color_map` is `None`
171
- - **uncertainty** (`None` or `np.ndarray`) Uncalibrated uncertainty(MAD, median absolute deviation)
172
- coming from ensembling. None if `ensemble_size = 1`
173
- """
174
-
175
- device = self.device
176
- input_size = input_image.size
177
-
178
- if not match_input_res:
179
- assert processing_res is not None, "Value error: `resize_output_back` is only valid with "
180
- assert processing_res >= 0
181
- assert ensemble_size >= 1
182
-
183
- # Check if denoising step is reasonable
184
- self._check_inference_step(denoising_steps)
185
-
186
- resample_method: Resampling = get_pil_resample_method(resample_method)
187
-
188
- # ----------------- Image Preprocess -----------------
189
- # Resize image
190
- if processing_res > 0:
191
- input_image = self.resize_max_res(
192
- input_image,
193
- max_edge_resolution=processing_res,
194
- resample_method=resample_method,
195
- )
196
- # Convert the image to RGB, to 1.remove the alpha channel 2.convert B&W to 3-channel
197
- input_image = input_image.convert("RGB")
198
- image = np.asarray(input_image)
199
-
200
- # Normalize rgb values
201
- rgb = np.transpose(image, (2, 0, 1)) # [H, W, rgb] -> [rgb, H, W]
202
- rgb_norm = rgb / 255.0 * 2.0 - 1.0 # [0, 255] -> [-1, 1]
203
- rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype)
204
- rgb_norm = rgb_norm.to(device)
205
- assert rgb_norm.min() >= -1.0 and rgb_norm.max() <= 1.0
206
-
207
- # ----------------- Predicting depth -----------------
208
- # Batch repeated input image
209
- duplicated_rgb = torch.stack([rgb_norm] * ensemble_size)
210
- single_rgb_dataset = TensorDataset(duplicated_rgb)
211
- if batch_size > 0:
212
- _bs = batch_size
213
- else:
214
- _bs = self._find_batch_size(
215
- ensemble_size=ensemble_size,
216
- input_res=max(rgb_norm.shape[1:]),
217
- dtype=self.dtype,
218
- )
219
-
220
- single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
221
-
222
- # Predict depth maps (batched)
223
- depth_pred_ls = []
224
- if show_progress_bar:
225
- iterable = tqdm(single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False)
226
- else:
227
- iterable = single_rgb_loader
228
- for batch in iterable:
229
- (batched_img,) = batch
230
- depth_pred_raw = self.single_infer(
231
- rgb_in=batched_img,
232
- num_inference_steps=denoising_steps,
233
- show_pbar=show_progress_bar,
234
- seed=seed,
235
- )
236
- depth_pred_ls.append(depth_pred_raw.detach())
237
- depth_preds = torch.concat(depth_pred_ls, dim=0).squeeze()
238
- torch.cuda.empty_cache() # clear vram cache for ensembling
239
-
240
- # ----------------- Test-time ensembling -----------------
241
- if ensemble_size > 1:
242
- depth_pred, pred_uncert = self.ensemble_depths(depth_preds, **(ensemble_kwargs or {}))
243
- else:
244
- depth_pred = depth_preds
245
- pred_uncert = None
246
-
247
- # ----------------- Post processing -----------------
248
- # Scale prediction to [0, 1]
249
- min_d = torch.min(depth_pred)
250
- max_d = torch.max(depth_pred)
251
- depth_pred = (depth_pred - min_d) / (max_d - min_d)
252
-
253
- # Convert to numpy
254
- depth_pred = depth_pred.cpu().numpy().astype(np.float32)
255
-
256
- # Resize back to original resolution
257
- if match_input_res:
258
- pred_img = Image.fromarray(depth_pred)
259
- pred_img = pred_img.resize(input_size, resample=resample_method)
260
- depth_pred = np.asarray(pred_img)
261
-
262
- # Clip output range
263
- depth_pred = depth_pred.clip(0, 1)
264
-
265
- # Colorize
266
- if color_map is not None:
267
- depth_colored = self.colorize_depth_maps(
268
- depth_pred, 0, 1, cmap=color_map
269
- ).squeeze() # [3, H, W], value in (0, 1)
270
- depth_colored = (depth_colored * 255).astype(np.uint8)
271
- depth_colored_hwc = self.chw2hwc(depth_colored)
272
- depth_colored_img = Image.fromarray(depth_colored_hwc)
273
- else:
274
- depth_colored_img = None
275
-
276
- return MarigoldDepthOutput(
277
- depth_np=depth_pred,
278
- depth_colored=depth_colored_img,
279
- uncertainty=pred_uncert,
280
- )
281
-
282
- def _check_inference_step(self, n_step: int):
283
- """
284
- Check if denoising step is reasonable
285
- Args:
286
- n_step (`int`): denoising steps
287
- """
288
- assert n_step >= 1
289
-
290
- if isinstance(self.scheduler, DDIMScheduler):
291
- if n_step < 10:
292
- logging.warning(
293
- f"Too few denoising steps: {n_step}. Recommended to use the LCM checkpoint for few-step inference."
294
- )
295
- elif isinstance(self.scheduler, LCMScheduler):
296
- if not 1 <= n_step <= 4:
297
- logging.warning(f"Non-optimal setting of denoising steps: {n_step}. Recommended setting is 1-4 steps.")
298
- else:
299
- raise RuntimeError(f"Unsupported scheduler type: {type(self.scheduler)}")
300
-
301
- def _encode_empty_text(self):
302
- """
303
- Encode text embedding for empty prompt.
304
- """
305
- prompt = ""
306
- text_inputs = self.tokenizer(
307
- prompt,
308
- padding="do_not_pad",
309
- max_length=self.tokenizer.model_max_length,
310
- truncation=True,
311
- return_tensors="pt",
312
- )
313
- text_input_ids = text_inputs.input_ids.to(self.text_encoder.device)
314
- self.empty_text_embed = self.text_encoder(text_input_ids)[0].to(self.dtype)
315
-
316
- @torch.no_grad()
317
- def single_infer(
318
- self,
319
- rgb_in: torch.Tensor,
320
- num_inference_steps: int,
321
- seed: Union[int, None],
322
- show_pbar: bool,
323
- ) -> torch.Tensor:
324
- """
325
- Perform an individual depth prediction without ensembling.
326
-
327
- Args:
328
- rgb_in (`torch.Tensor`):
329
- Input RGB image.
330
- num_inference_steps (`int`):
331
- Number of diffusion denoisign steps (DDIM) during inference.
332
- show_pbar (`bool`):
333
- Display a progress bar of diffusion denoising.
334
- Returns:
335
- `torch.Tensor`: Predicted depth map.
336
- """
337
- device = rgb_in.device
338
-
339
- # Set timesteps
340
- self.scheduler.set_timesteps(num_inference_steps, device=device)
341
- timesteps = self.scheduler.timesteps # [T]
342
-
343
- # Encode image
344
- rgb_latent = self.encode_rgb(rgb_in)
345
-
346
- # Initial depth map (noise)
347
- if seed is None:
348
- rand_num_generator = None
349
- else:
350
- rand_num_generator = torch.Generator(device=device)
351
- rand_num_generator.manual_seed(seed)
352
- depth_latent = torch.randn(
353
- rgb_latent.shape,
354
- device=device,
355
- dtype=self.dtype,
356
- generator=rand_num_generator,
357
- ) # [B, 4, h, w]
358
-
359
- # Batched empty text embedding
360
- if self.empty_text_embed is None:
361
- self._encode_empty_text()
362
- batch_empty_text_embed = self.empty_text_embed.repeat((rgb_latent.shape[0], 1, 1)) # [B, 2, 1024]
363
-
364
- # Denoising loop
365
- if show_pbar:
366
- iterable = tqdm(
367
- enumerate(timesteps),
368
- total=len(timesteps),
369
- leave=False,
370
- desc=" " * 4 + "Diffusion denoising",
371
- )
372
- else:
373
- iterable = enumerate(timesteps)
374
-
375
- for i, t in iterable:
376
- unet_input = torch.cat([rgb_latent, depth_latent], dim=1) # this order is important
377
-
378
- # predict the noise residual
379
- noise_pred = self.unet(unet_input, t, encoder_hidden_states=batch_empty_text_embed).sample # [B, 4, h, w]
380
-
381
- # compute the previous noisy sample x_t -> x_t-1
382
- depth_latent = self.scheduler.step(noise_pred, t, depth_latent, generator=rand_num_generator).prev_sample
383
-
384
- depth = self.decode_depth(depth_latent)
385
-
386
- # clip prediction
387
- depth = torch.clip(depth, -1.0, 1.0)
388
- # shift to [0, 1]
389
- depth = (depth + 1.0) / 2.0
390
-
391
- return depth
392
-
393
- def encode_rgb(self, rgb_in: torch.Tensor) -> torch.Tensor:
394
- """
395
- Encode RGB image into latent.
396
-
397
- Args:
398
- rgb_in (`torch.Tensor`):
399
- Input RGB image to be encoded.
400
-
401
- Returns:
402
- `torch.Tensor`: Image latent.
403
- """
404
- # encode
405
- h = self.vae.encoder(rgb_in)
406
- moments = self.vae.quant_conv(h)
407
- mean, logvar = torch.chunk(moments, 2, dim=1)
408
- # scale latent
409
- rgb_latent = mean * self.rgb_latent_scale_factor
410
- return rgb_latent
411
-
412
- def decode_depth(self, depth_latent: torch.Tensor) -> torch.Tensor:
413
- """
414
- Decode depth latent into depth map.
415
-
416
- Args:
417
- depth_latent (`torch.Tensor`):
418
- Depth latent to be decoded.
419
-
420
- Returns:
421
- `torch.Tensor`: Decoded depth map.
422
- """
423
- # scale latent
424
- depth_latent = depth_latent / self.depth_latent_scale_factor
425
- # decode
426
- z = self.vae.post_quant_conv(depth_latent)
427
- stacked = self.vae.decoder(z)
428
- # mean of output channels
429
- depth_mean = stacked.mean(dim=1, keepdim=True)
430
- return depth_mean
431
-
432
- @staticmethod
433
- def resize_max_res(img: Image.Image, max_edge_resolution: int, resample_method=Resampling.BILINEAR) -> Image.Image:
434
- """
435
- Resize image to limit maximum edge length while keeping aspect ratio.
436
-
437
- Args:
438
- img (`Image.Image`):
439
- Image to be resized.
440
- max_edge_resolution (`int`):
441
- Maximum edge length (pixel).
442
- resample_method (`PIL.Image.Resampling`):
443
- Resampling method used to resize images.
444
-
445
- Returns:
446
- `Image.Image`: Resized image.
447
- """
448
- original_width, original_height = img.size
449
- downscale_factor = min(max_edge_resolution / original_width, max_edge_resolution / original_height)
450
-
451
- new_width = int(original_width * downscale_factor)
452
- new_height = int(original_height * downscale_factor)
453
-
454
- resized_img = img.resize((new_width, new_height), resample=resample_method)
455
- return resized_img
456
-
457
- @staticmethod
458
- def colorize_depth_maps(depth_map, min_depth, max_depth, cmap="Spectral", valid_mask=None):
459
- """
460
- Colorize depth maps.
461
- """
462
- assert len(depth_map.shape) >= 2, "Invalid dimension"
463
-
464
- if isinstance(depth_map, torch.Tensor):
465
- depth = depth_map.detach().clone().squeeze().numpy()
466
- elif isinstance(depth_map, np.ndarray):
467
- depth = depth_map.copy().squeeze()
468
- # reshape to [ (B,) H, W ]
469
- if depth.ndim < 3:
470
- depth = depth[np.newaxis, :, :]
471
-
472
- # colorize
473
- cm = matplotlib.colormaps[cmap]
474
- depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
475
- img_colored_np = cm(depth, bytes=False)[:, :, :, 0:3] # value from 0 to 1
476
- img_colored_np = np.rollaxis(img_colored_np, 3, 1)
477
-
478
- if valid_mask is not None:
479
- if isinstance(depth_map, torch.Tensor):
480
- valid_mask = valid_mask.detach().numpy()
481
- valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
482
- if valid_mask.ndim < 3:
483
- valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
484
- else:
485
- valid_mask = valid_mask[:, np.newaxis, :, :]
486
- valid_mask = np.repeat(valid_mask, 3, axis=1)
487
- img_colored_np[~valid_mask] = 0
488
-
489
- if isinstance(depth_map, torch.Tensor):
490
- img_colored = torch.from_numpy(img_colored_np).float()
491
- elif isinstance(depth_map, np.ndarray):
492
- img_colored = img_colored_np
493
-
494
- return img_colored
495
-
496
- @staticmethod
497
- def chw2hwc(chw):
498
- assert 3 == len(chw.shape)
499
- if isinstance(chw, torch.Tensor):
500
- hwc = torch.permute(chw, (1, 2, 0))
501
- elif isinstance(chw, np.ndarray):
502
- hwc = np.moveaxis(chw, 0, -1)
503
- return hwc
504
-
505
- @staticmethod
506
- def _find_batch_size(ensemble_size: int, input_res: int, dtype: torch.dtype) -> int:
507
- """
508
- Automatically search for suitable operating batch size.
509
-
510
- Args:
511
- ensemble_size (`int`):
512
- Number of predictions to be ensembled.
513
- input_res (`int`):
514
- Operating resolution of the input image.
515
-
516
- Returns:
517
- `int`: Operating batch size.
518
- """
519
- # Search table for suggested max. inference batch size
520
- bs_search_table = [
521
- # tested on A100-PCIE-80GB
522
- {"res": 768, "total_vram": 79, "bs": 35, "dtype": torch.float32},
523
- {"res": 1024, "total_vram": 79, "bs": 20, "dtype": torch.float32},
524
- # tested on A100-PCIE-40GB
525
- {"res": 768, "total_vram": 39, "bs": 15, "dtype": torch.float32},
526
- {"res": 1024, "total_vram": 39, "bs": 8, "dtype": torch.float32},
527
- {"res": 768, "total_vram": 39, "bs": 30, "dtype": torch.float16},
528
- {"res": 1024, "total_vram": 39, "bs": 15, "dtype": torch.float16},
529
- # tested on RTX3090, RTX4090
530
- {"res": 512, "total_vram": 23, "bs": 20, "dtype": torch.float32},
531
- {"res": 768, "total_vram": 23, "bs": 7, "dtype": torch.float32},
532
- {"res": 1024, "total_vram": 23, "bs": 3, "dtype": torch.float32},
533
- {"res": 512, "total_vram": 23, "bs": 40, "dtype": torch.float16},
534
- {"res": 768, "total_vram": 23, "bs": 18, "dtype": torch.float16},
535
- {"res": 1024, "total_vram": 23, "bs": 10, "dtype": torch.float16},
536
- # tested on GTX1080Ti
537
- {"res": 512, "total_vram": 10, "bs": 5, "dtype": torch.float32},
538
- {"res": 768, "total_vram": 10, "bs": 2, "dtype": torch.float32},
539
- {"res": 512, "total_vram": 10, "bs": 10, "dtype": torch.float16},
540
- {"res": 768, "total_vram": 10, "bs": 5, "dtype": torch.float16},
541
- {"res": 1024, "total_vram": 10, "bs": 3, "dtype": torch.float16},
542
- ]
543
-
544
- if not torch.cuda.is_available():
545
- return 1
546
-
547
- total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3
548
- filtered_bs_search_table = [s for s in bs_search_table if s["dtype"] == dtype]
549
- for settings in sorted(
550
- filtered_bs_search_table,
551
- key=lambda k: (k["res"], -k["total_vram"]),
552
- ):
553
- if input_res <= settings["res"] and total_vram >= settings["total_vram"]:
554
- bs = settings["bs"]
555
- if bs > ensemble_size:
556
- bs = ensemble_size
557
- elif bs > math.ceil(ensemble_size / 2) and bs < ensemble_size:
558
- bs = math.ceil(ensemble_size / 2)
559
- return bs
560
-
561
- return 1
562
-
563
- @staticmethod
564
- def ensemble_depths(
565
- input_images: torch.Tensor,
566
- regularizer_strength: float = 0.02,
567
- max_iter: int = 2,
568
- tol: float = 1e-3,
569
- reduction: str = "median",
570
- max_res: int = None,
571
- ):
572
- """
573
- To ensemble multiple affine-invariant depth images (up to scale and shift),
574
- by aligning estimating the scale and shift
575
- """
576
-
577
- def inter_distances(tensors: torch.Tensor):
578
- """
579
- To calculate the distance between each two depth maps.
580
- """
581
- distances = []
582
- for i, j in torch.combinations(torch.arange(tensors.shape[0])):
583
- arr1 = tensors[i : i + 1]
584
- arr2 = tensors[j : j + 1]
585
- distances.append(arr1 - arr2)
586
- dist = torch.concatenate(distances, dim=0)
587
- return dist
588
-
589
- device = input_images.device
590
- dtype = input_images.dtype
591
- np_dtype = np.float32
592
-
593
- original_input = input_images.clone()
594
- n_img = input_images.shape[0]
595
- ori_shape = input_images.shape
596
-
597
- if max_res is not None:
598
- scale_factor = torch.min(max_res / torch.tensor(ori_shape[-2:]))
599
- if scale_factor < 1:
600
- downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
601
- input_images = downscaler(torch.from_numpy(input_images)).numpy()
602
-
603
- # init guess
604
- _min = np.min(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
605
- _max = np.max(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
606
- s_init = 1.0 / (_max - _min).reshape((-1, 1, 1))
607
- t_init = (-1 * s_init.flatten() * _min.flatten()).reshape((-1, 1, 1))
608
- x = np.concatenate([s_init, t_init]).reshape(-1).astype(np_dtype)
609
-
610
- input_images = input_images.to(device)
611
-
612
- # objective function
613
- def closure(x):
614
- l = len(x)
615
- s = x[: int(l / 2)]
616
- t = x[int(l / 2) :]
617
- s = torch.from_numpy(s).to(dtype=dtype).to(device)
618
- t = torch.from_numpy(t).to(dtype=dtype).to(device)
619
-
620
- transformed_arrays = input_images * s.view((-1, 1, 1)) + t.view((-1, 1, 1))
621
- dists = inter_distances(transformed_arrays)
622
- sqrt_dist = torch.sqrt(torch.mean(dists**2))
623
-
624
- if "mean" == reduction:
625
- pred = torch.mean(transformed_arrays, dim=0)
626
- elif "median" == reduction:
627
- pred = torch.median(transformed_arrays, dim=0).values
628
- else:
629
- raise ValueError
630
-
631
- near_err = torch.sqrt((0 - torch.min(pred)) ** 2)
632
- far_err = torch.sqrt((1 - torch.max(pred)) ** 2)
633
-
634
- err = sqrt_dist + (near_err + far_err) * regularizer_strength
635
- err = err.detach().cpu().numpy().astype(np_dtype)
636
- return err
637
-
638
- res = minimize(
639
- closure,
640
- x,
641
- method="BFGS",
642
- tol=tol,
643
- options={"maxiter": max_iter, "disp": False},
644
- )
645
- x = res.x
646
- l = len(x)
647
- s = x[: int(l / 2)]
648
- t = x[int(l / 2) :]
649
-
650
- # Prediction
651
- s = torch.from_numpy(s).to(dtype=dtype).to(device)
652
- t = torch.from_numpy(t).to(dtype=dtype).to(device)
653
- transformed_arrays = original_input * s.view(-1, 1, 1) + t.view(-1, 1, 1)
654
- if "mean" == reduction:
655
- aligned_images = torch.mean(transformed_arrays, dim=0)
656
- std = torch.std(transformed_arrays, dim=0)
657
- uncertainty = std
658
- elif "median" == reduction:
659
- aligned_images = torch.median(transformed_arrays, dim=0).values
660
- # MAD (median absolute deviation) as uncertainty indicator
661
- abs_dev = torch.abs(transformed_arrays - aligned_images)
662
- mad = torch.median(abs_dev, dim=0).values
663
- uncertainty = mad
664
- else:
665
- raise ValueError(f"Unknown reduction method: {reduction}")
666
-
667
- # Scale and shift to [0, 1]
668
- _min = torch.min(aligned_images)
669
- _max = torch.max(aligned_images)
670
- aligned_images = (aligned_images - _min) / (_max - _min)
671
- uncertainty /= _max - _min
672
-
673
- return aligned_images, uncertainty
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
masked_stable_diffusion_img2img.py DELETED
@@ -1,262 +0,0 @@
1
- from typing import Any, Callable, Dict, List, Optional, Union
2
-
3
- import numpy as np
4
- import PIL.Image
5
- import torch
6
-
7
- from diffusers import StableDiffusionImg2ImgPipeline
8
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
9
-
10
-
11
- class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
12
- debug_save = False
13
-
14
- @torch.no_grad()
15
- def __call__(
16
- self,
17
- prompt: Union[str, List[str]] = None,
18
- image: Union[
19
- torch.Tensor,
20
- PIL.Image.Image,
21
- np.ndarray,
22
- List[torch.Tensor],
23
- List[PIL.Image.Image],
24
- List[np.ndarray],
25
- ] = None,
26
- strength: float = 0.8,
27
- num_inference_steps: Optional[int] = 50,
28
- guidance_scale: Optional[float] = 7.5,
29
- negative_prompt: Optional[Union[str, List[str]]] = None,
30
- num_images_per_prompt: Optional[int] = 1,
31
- eta: Optional[float] = 0.0,
32
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
33
- prompt_embeds: Optional[torch.Tensor] = None,
34
- negative_prompt_embeds: Optional[torch.Tensor] = None,
35
- output_type: Optional[str] = "pil",
36
- return_dict: bool = True,
37
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
38
- callback_steps: int = 1,
39
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
40
- mask: Union[
41
- torch.Tensor,
42
- PIL.Image.Image,
43
- np.ndarray,
44
- List[torch.Tensor],
45
- List[PIL.Image.Image],
46
- List[np.ndarray],
47
- ] = None,
48
- ):
49
- r"""
50
- The call function to the pipeline for generation.
51
-
52
- Args:
53
- prompt (`str` or `List[str]`, *optional*):
54
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
55
- image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
56
- `Image` or tensor representing an image batch to be used as the starting point. Can also accept image
57
- latents as `image`, but if passing latents directly it is not encoded again.
58
- strength (`float`, *optional*, defaults to 0.8):
59
- Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
60
- starting point and more noise is added the higher the `strength`. The number of denoising steps depends
61
- on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
62
- process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
63
- essentially ignores `image`.
64
- num_inference_steps (`int`, *optional*, defaults to 50):
65
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
66
- expense of slower inference. This parameter is modulated by `strength`.
67
- guidance_scale (`float`, *optional*, defaults to 7.5):
68
- A higher guidance scale value encourages the model to generate images closely linked to the text
69
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
70
- negative_prompt (`str` or `List[str]`, *optional*):
71
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
72
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
73
- num_images_per_prompt (`int`, *optional*, defaults to 1):
74
- The number of images to generate per prompt.
75
- eta (`float`, *optional*, defaults to 0.0):
76
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
77
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
78
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
79
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
80
- generation deterministic.
81
- prompt_embeds (`torch.Tensor`, *optional*):
82
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
83
- provided, text embeddings are generated from the `prompt` input argument.
84
- negative_prompt_embeds (`torch.Tensor`, *optional*):
85
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
86
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
87
- output_type (`str`, *optional*, defaults to `"pil"`):
88
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
89
- return_dict (`bool`, *optional*, defaults to `True`):
90
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
91
- plain tuple.
92
- callback (`Callable`, *optional*):
93
- A function that calls every `callback_steps` steps during inference. The function is called with the
94
- following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
95
- callback_steps (`int`, *optional*, defaults to 1):
96
- The frequency at which the `callback` function is called. If not specified, the callback is called at
97
- every step.
98
- cross_attention_kwargs (`dict`, *optional*):
99
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
100
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
101
- mask (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*):
102
- A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied.
103
- Examples:
104
-
105
- Returns:
106
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
107
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
108
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
109
- second element is a list of `bool`s indicating whether the corresponding generated image contains
110
- "not-safe-for-work" (nsfw) content.
111
- """
112
- # code adapted from parent class StableDiffusionImg2ImgPipeline
113
-
114
- # 0. Check inputs. Raise error if not correct
115
- self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
116
-
117
- # 1. Define call parameters
118
- if prompt is not None and isinstance(prompt, str):
119
- batch_size = 1
120
- elif prompt is not None and isinstance(prompt, list):
121
- batch_size = len(prompt)
122
- else:
123
- batch_size = prompt_embeds.shape[0]
124
- device = self._execution_device
125
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
126
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
127
- # corresponds to doing no classifier free guidance.
128
- do_classifier_free_guidance = guidance_scale > 1.0
129
-
130
- # 2. Encode input prompt
131
- text_encoder_lora_scale = (
132
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
133
- )
134
- prompt_embeds = self._encode_prompt(
135
- prompt,
136
- device,
137
- num_images_per_prompt,
138
- do_classifier_free_guidance,
139
- negative_prompt,
140
- prompt_embeds=prompt_embeds,
141
- negative_prompt_embeds=negative_prompt_embeds,
142
- lora_scale=text_encoder_lora_scale,
143
- )
144
-
145
- # 3. Preprocess image
146
- image = self.image_processor.preprocess(image)
147
-
148
- # 4. set timesteps
149
- self.scheduler.set_timesteps(num_inference_steps, device=device)
150
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
151
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
152
-
153
- # 5. Prepare latent variables
154
- # it is sampled from the latent distribution of the VAE
155
- latents = self.prepare_latents(
156
- image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
157
- )
158
-
159
- # mean of the latent distribution
160
- init_latents = [
161
- self.vae.encode(image.to(device=device, dtype=prompt_embeds.dtype)[i : i + 1]).latent_dist.mean
162
- for i in range(batch_size)
163
- ]
164
- init_latents = torch.cat(init_latents, dim=0)
165
-
166
- # 6. create latent mask
167
- latent_mask = self._make_latent_mask(latents, mask)
168
-
169
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
170
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
171
-
172
- # 8. Denoising loop
173
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
174
- with self.progress_bar(total=num_inference_steps) as progress_bar:
175
- for i, t in enumerate(timesteps):
176
- # expand the latents if we are doing classifier free guidance
177
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
178
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
179
-
180
- # predict the noise residual
181
- noise_pred = self.unet(
182
- latent_model_input,
183
- t,
184
- encoder_hidden_states=prompt_embeds,
185
- cross_attention_kwargs=cross_attention_kwargs,
186
- return_dict=False,
187
- )[0]
188
-
189
- # perform guidance
190
- if do_classifier_free_guidance:
191
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
192
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
193
-
194
- if latent_mask is not None:
195
- latents = torch.lerp(init_latents * self.vae.config.scaling_factor, latents, latent_mask)
196
- noise_pred = torch.lerp(torch.zeros_like(noise_pred), noise_pred, latent_mask)
197
-
198
- # compute the previous noisy sample x_t -> x_t-1
199
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
200
-
201
- # call the callback, if provided
202
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
203
- progress_bar.update()
204
- if callback is not None and i % callback_steps == 0:
205
- step_idx = i // getattr(self.scheduler, "order", 1)
206
- callback(step_idx, t, latents)
207
-
208
- if not output_type == "latent":
209
- scaled = latents / self.vae.config.scaling_factor
210
- if latent_mask is not None:
211
- # scaled = latents / self.vae.config.scaling_factor * latent_mask + init_latents * (1 - latent_mask)
212
- scaled = torch.lerp(init_latents, scaled, latent_mask)
213
- image = self.vae.decode(scaled, return_dict=False)[0]
214
- if self.debug_save:
215
- image_gen = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
216
- image_gen = self.image_processor.postprocess(image_gen, output_type=output_type, do_denormalize=[True])
217
- image_gen[0].save("from_latent.png")
218
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
219
- else:
220
- image = latents
221
- has_nsfw_concept = None
222
-
223
- if has_nsfw_concept is None:
224
- do_denormalize = [True] * image.shape[0]
225
- else:
226
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
227
-
228
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
229
-
230
- # Offload last model to CPU
231
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
232
- self.final_offload_hook.offload()
233
-
234
- if not return_dict:
235
- return (image, has_nsfw_concept)
236
-
237
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
238
-
239
- def _make_latent_mask(self, latents, mask):
240
- if mask is not None:
241
- latent_mask = []
242
- if not isinstance(mask, list):
243
- tmp_mask = [mask]
244
- else:
245
- tmp_mask = mask
246
- _, l_channels, l_height, l_width = latents.shape
247
- for m in tmp_mask:
248
- if not isinstance(m, PIL.Image.Image):
249
- if len(m.shape) == 2:
250
- m = m[..., np.newaxis]
251
- if m.max() > 1:
252
- m = m / 255.0
253
- m = self.image_processor.numpy_to_pil(m)[0]
254
- if m.mode != "L":
255
- m = m.convert("L")
256
- resized = self.image_processor.resize(m, l_height, l_width)
257
- if self.debug_save:
258
- resized.save("latent_mask.png")
259
- latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0))
260
- latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents)
261
- latent_mask = latent_mask / latent_mask.max()
262
- return latent_mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mixture_canvas.py DELETED
@@ -1,501 +0,0 @@
1
- import re
2
- from copy import deepcopy
3
- from dataclasses import asdict, dataclass
4
- from enum import Enum
5
- from typing import List, Optional, Union
6
-
7
- import numpy as np
8
- import torch
9
- from numpy import exp, pi, sqrt
10
- from torchvision.transforms.functional import resize
11
- from tqdm.auto import tqdm
12
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
13
-
14
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
15
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
16
- from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
17
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
18
-
19
-
20
- def preprocess_image(image):
21
- from PIL import Image
22
-
23
- """Preprocess an input image
24
-
25
- Same as
26
- https://github.com/huggingface/diffusers/blob/1138d63b519e37f0ce04e027b9f4a3261d27c628/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L44
27
- """
28
- w, h = image.size
29
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
30
- image = image.resize((w, h), resample=Image.LANCZOS)
31
- image = np.array(image).astype(np.float32) / 255.0
32
- image = image[None].transpose(0, 3, 1, 2)
33
- image = torch.from_numpy(image)
34
- return 2.0 * image - 1.0
35
-
36
-
37
- @dataclass
38
- class CanvasRegion:
39
- """Class defining a rectangular region in the canvas"""
40
-
41
- row_init: int # Region starting row in pixel space (included)
42
- row_end: int # Region end row in pixel space (not included)
43
- col_init: int # Region starting column in pixel space (included)
44
- col_end: int # Region end column in pixel space (not included)
45
- region_seed: int = None # Seed for random operations in this region
46
- noise_eps: float = 0.0 # Deviation of a zero-mean gaussian noise to be applied over the latents in this region. Useful for slightly "rerolling" latents
47
-
48
- def __post_init__(self):
49
- # Initialize arguments if not specified
50
- if self.region_seed is None:
51
- self.region_seed = np.random.randint(9999999999)
52
- # Check coordinates are non-negative
53
- for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
54
- if coord < 0:
55
- raise ValueError(
56
- f"A CanvasRegion must be defined with non-negative indices, found ({self.row_init}, {self.row_end}, {self.col_init}, {self.col_end})"
57
- )
58
- # Check coordinates are divisible by 8, else we end up with nasty rounding error when mapping to latent space
59
- for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
60
- if coord // 8 != coord / 8:
61
- raise ValueError(
62
- f"A CanvasRegion must be defined with locations divisible by 8, found ({self.row_init}-{self.row_end}, {self.col_init}-{self.col_end})"
63
- )
64
- # Check noise eps is non-negative
65
- if self.noise_eps < 0:
66
- raise ValueError(f"A CanvasRegion must be defined noises eps non-negative, found {self.noise_eps}")
67
- # Compute coordinates for this region in latent space
68
- self.latent_row_init = self.row_init // 8
69
- self.latent_row_end = self.row_end // 8
70
- self.latent_col_init = self.col_init // 8
71
- self.latent_col_end = self.col_end // 8
72
-
73
- @property
74
- def width(self):
75
- return self.col_end - self.col_init
76
-
77
- @property
78
- def height(self):
79
- return self.row_end - self.row_init
80
-
81
- def get_region_generator(self, device="cpu"):
82
- """Creates a torch.Generator based on the random seed of this region"""
83
- # Initialize region generator
84
- return torch.Generator(device).manual_seed(self.region_seed)
85
-
86
- @property
87
- def __dict__(self):
88
- return asdict(self)
89
-
90
-
91
- class MaskModes(Enum):
92
- """Modes in which the influence of diffuser is masked"""
93
-
94
- CONSTANT = "constant"
95
- GAUSSIAN = "gaussian"
96
- QUARTIC = "quartic" # See https://en.wikipedia.org/wiki/Kernel_(statistics)
97
-
98
-
99
- @dataclass
100
- class DiffusionRegion(CanvasRegion):
101
- """Abstract class defining a region where some class of diffusion process is acting"""
102
-
103
- pass
104
-
105
-
106
- @dataclass
107
- class Text2ImageRegion(DiffusionRegion):
108
- """Class defining a region where a text guided diffusion process is acting"""
109
-
110
- prompt: str = "" # Text prompt guiding the diffuser in this region
111
- guidance_scale: float = 7.5 # Guidance scale of the diffuser in this region. If None, randomize
112
- mask_type: MaskModes = MaskModes.GAUSSIAN.value # Kind of weight mask applied to this region
113
- mask_weight: float = 1.0 # Global weights multiplier of the mask
114
- tokenized_prompt = None # Tokenized prompt
115
- encoded_prompt = None # Encoded prompt
116
-
117
- def __post_init__(self):
118
- super().__post_init__()
119
- # Mask weight cannot be negative
120
- if self.mask_weight < 0:
121
- raise ValueError(
122
- f"A Text2ImageRegion must be defined with non-negative mask weight, found {self.mask_weight}"
123
- )
124
- # Mask type must be an actual known mask
125
- if self.mask_type not in [e.value for e in MaskModes]:
126
- raise ValueError(
127
- f"A Text2ImageRegion was defined with mask {self.mask_type}, which is not an accepted mask ({[e.value for e in MaskModes]})"
128
- )
129
- # Randomize arguments if given as None
130
- if self.guidance_scale is None:
131
- self.guidance_scale = np.random.randint(5, 30)
132
- # Clean prompt
133
- self.prompt = re.sub(" +", " ", self.prompt).replace("\n", " ")
134
-
135
- def tokenize_prompt(self, tokenizer):
136
- """Tokenizes the prompt for this diffusion region using a given tokenizer"""
137
- self.tokenized_prompt = tokenizer(
138
- self.prompt,
139
- padding="max_length",
140
- max_length=tokenizer.model_max_length,
141
- truncation=True,
142
- return_tensors="pt",
143
- )
144
-
145
- def encode_prompt(self, text_encoder, device):
146
- """Encodes the previously tokenized prompt for this diffusion region using a given encoder"""
147
- assert self.tokenized_prompt is not None, ValueError(
148
- "Prompt in diffusion region must be tokenized before encoding"
149
- )
150
- self.encoded_prompt = text_encoder(self.tokenized_prompt.input_ids.to(device))[0]
151
-
152
-
153
- @dataclass
154
- class Image2ImageRegion(DiffusionRegion):
155
- """Class defining a region where an image guided diffusion process is acting"""
156
-
157
- reference_image: torch.Tensor = None
158
- strength: float = 0.8 # Strength of the image
159
-
160
- def __post_init__(self):
161
- super().__post_init__()
162
- if self.reference_image is None:
163
- raise ValueError("Must provide a reference image when creating an Image2ImageRegion")
164
- if self.strength < 0 or self.strength > 1:
165
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {self.strength}")
166
- # Rescale image to region shape
167
- self.reference_image = resize(self.reference_image, size=[self.height, self.width])
168
-
169
- def encode_reference_image(self, encoder, device, generator, cpu_vae=False):
170
- """Encodes the reference image for this Image2Image region into the latent space"""
171
- # Place encoder in CPU or not following the parameter cpu_vae
172
- if cpu_vae:
173
- # Note here we use mean instead of sample, to avoid moving also generator to CPU, which is troublesome
174
- self.reference_latents = encoder.cpu().encode(self.reference_image).latent_dist.mean.to(device)
175
- else:
176
- self.reference_latents = encoder.encode(self.reference_image.to(device)).latent_dist.sample(
177
- generator=generator
178
- )
179
- self.reference_latents = 0.18215 * self.reference_latents
180
-
181
- @property
182
- def __dict__(self):
183
- # This class requires special casting to dict because of the reference_image tensor. Otherwise it cannot be casted to JSON
184
-
185
- # Get all basic fields from parent class
186
- super_fields = {key: getattr(self, key) for key in DiffusionRegion.__dataclass_fields__.keys()}
187
- # Pack other fields
188
- return {**super_fields, "reference_image": self.reference_image.cpu().tolist(), "strength": self.strength}
189
-
190
-
191
- class RerollModes(Enum):
192
- """Modes in which the reroll regions operate"""
193
-
194
- RESET = "reset" # Completely reset the random noise in the region
195
- EPSILON = "epsilon" # Alter slightly the latents in the region
196
-
197
-
198
- @dataclass
199
- class RerollRegion(CanvasRegion):
200
- """Class defining a rectangular canvas region in which initial latent noise will be rerolled"""
201
-
202
- reroll_mode: RerollModes = RerollModes.RESET.value
203
-
204
-
205
- @dataclass
206
- class MaskWeightsBuilder:
207
- """Auxiliary class to compute a tensor of weights for a given diffusion region"""
208
-
209
- latent_space_dim: int # Size of the U-net latent space
210
- nbatch: int = 1 # Batch size in the U-net
211
-
212
- def compute_mask_weights(self, region: DiffusionRegion) -> torch.tensor:
213
- """Computes a tensor of weights for a given diffusion region"""
214
- MASK_BUILDERS = {
215
- MaskModes.CONSTANT.value: self._constant_weights,
216
- MaskModes.GAUSSIAN.value: self._gaussian_weights,
217
- MaskModes.QUARTIC.value: self._quartic_weights,
218
- }
219
- return MASK_BUILDERS[region.mask_type](region)
220
-
221
- def _constant_weights(self, region: DiffusionRegion) -> torch.tensor:
222
- """Computes a tensor of constant for a given diffusion region"""
223
- latent_width = region.latent_col_end - region.latent_col_init
224
- latent_height = region.latent_row_end - region.latent_row_init
225
- return torch.ones(self.nbatch, self.latent_space_dim, latent_height, latent_width) * region.mask_weight
226
-
227
- def _gaussian_weights(self, region: DiffusionRegion) -> torch.tensor:
228
- """Generates a gaussian mask of weights for tile contributions"""
229
- latent_width = region.latent_col_end - region.latent_col_init
230
- latent_height = region.latent_row_end - region.latent_row_init
231
-
232
- var = 0.01
233
- midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
234
- x_probs = [
235
- exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
236
- for x in range(latent_width)
237
- ]
238
- midpoint = (latent_height - 1) / 2
239
- y_probs = [
240
- exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
241
- for y in range(latent_height)
242
- ]
243
-
244
- weights = np.outer(y_probs, x_probs) * region.mask_weight
245
- return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
246
-
247
- def _quartic_weights(self, region: DiffusionRegion) -> torch.tensor:
248
- """Generates a quartic mask of weights for tile contributions
249
-
250
- The quartic kernel has bounded support over the diffusion region, and a smooth decay to the region limits.
251
- """
252
- quartic_constant = 15.0 / 16.0
253
-
254
- support = (np.array(range(region.latent_col_init, region.latent_col_end)) - region.latent_col_init) / (
255
- region.latent_col_end - region.latent_col_init - 1
256
- ) * 1.99 - (1.99 / 2.0)
257
- x_probs = quartic_constant * np.square(1 - np.square(support))
258
- support = (np.array(range(region.latent_row_init, region.latent_row_end)) - region.latent_row_init) / (
259
- region.latent_row_end - region.latent_row_init - 1
260
- ) * 1.99 - (1.99 / 2.0)
261
- y_probs = quartic_constant * np.square(1 - np.square(support))
262
-
263
- weights = np.outer(y_probs, x_probs) * region.mask_weight
264
- return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
265
-
266
-
267
- class StableDiffusionCanvasPipeline(DiffusionPipeline, StableDiffusionMixin):
268
- """Stable Diffusion pipeline that mixes several diffusers in the same canvas"""
269
-
270
- def __init__(
271
- self,
272
- vae: AutoencoderKL,
273
- text_encoder: CLIPTextModel,
274
- tokenizer: CLIPTokenizer,
275
- unet: UNet2DConditionModel,
276
- scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler],
277
- safety_checker: StableDiffusionSafetyChecker,
278
- feature_extractor: CLIPFeatureExtractor,
279
- ):
280
- super().__init__()
281
- self.register_modules(
282
- vae=vae,
283
- text_encoder=text_encoder,
284
- tokenizer=tokenizer,
285
- unet=unet,
286
- scheduler=scheduler,
287
- safety_checker=safety_checker,
288
- feature_extractor=feature_extractor,
289
- )
290
-
291
- def decode_latents(self, latents, cpu_vae=False):
292
- """Decodes a given array of latents into pixel space"""
293
- # scale and decode the image latents with vae
294
- if cpu_vae:
295
- lat = deepcopy(latents).cpu()
296
- vae = deepcopy(self.vae).cpu()
297
- else:
298
- lat = latents
299
- vae = self.vae
300
-
301
- lat = 1 / 0.18215 * lat
302
- image = vae.decode(lat).sample
303
-
304
- image = (image / 2 + 0.5).clamp(0, 1)
305
- image = image.cpu().permute(0, 2, 3, 1).numpy()
306
-
307
- return self.numpy_to_pil(image)
308
-
309
- def get_latest_timestep_img2img(self, num_inference_steps, strength):
310
- """Finds the latest timesteps where an img2img strength does not impose latents anymore"""
311
- # get the original timestep using init_timestep
312
- offset = self.scheduler.config.get("steps_offset", 0)
313
- init_timestep = int(num_inference_steps * (1 - strength)) + offset
314
- init_timestep = min(init_timestep, num_inference_steps)
315
-
316
- t_start = min(max(num_inference_steps - init_timestep + offset, 0), num_inference_steps - 1)
317
- latest_timestep = self.scheduler.timesteps[t_start]
318
-
319
- return latest_timestep
320
-
321
- @torch.no_grad()
322
- def __call__(
323
- self,
324
- canvas_height: int,
325
- canvas_width: int,
326
- regions: List[DiffusionRegion],
327
- num_inference_steps: Optional[int] = 50,
328
- seed: Optional[int] = 12345,
329
- reroll_regions: Optional[List[RerollRegion]] = None,
330
- cpu_vae: Optional[bool] = False,
331
- decode_steps: Optional[bool] = False,
332
- ):
333
- if reroll_regions is None:
334
- reroll_regions = []
335
- batch_size = 1
336
-
337
- if decode_steps:
338
- steps_images = []
339
-
340
- # Prepare scheduler
341
- self.scheduler.set_timesteps(num_inference_steps, device=self.device)
342
-
343
- # Split diffusion regions by their kind
344
- text2image_regions = [region for region in regions if isinstance(region, Text2ImageRegion)]
345
- image2image_regions = [region for region in regions if isinstance(region, Image2ImageRegion)]
346
-
347
- # Prepare text embeddings
348
- for region in text2image_regions:
349
- region.tokenize_prompt(self.tokenizer)
350
- region.encode_prompt(self.text_encoder, self.device)
351
-
352
- # Create original noisy latents using the timesteps
353
- latents_shape = (batch_size, self.unet.config.in_channels, canvas_height // 8, canvas_width // 8)
354
- generator = torch.Generator(self.device).manual_seed(seed)
355
- init_noise = torch.randn(latents_shape, generator=generator, device=self.device)
356
-
357
- # Reset latents in seed reroll regions, if requested
358
- for region in reroll_regions:
359
- if region.reroll_mode == RerollModes.RESET.value:
360
- region_shape = (
361
- latents_shape[0],
362
- latents_shape[1],
363
- region.latent_row_end - region.latent_row_init,
364
- region.latent_col_end - region.latent_col_init,
365
- )
366
- init_noise[
367
- :,
368
- :,
369
- region.latent_row_init : region.latent_row_end,
370
- region.latent_col_init : region.latent_col_end,
371
- ] = torch.randn(region_shape, generator=region.get_region_generator(self.device), device=self.device)
372
-
373
- # Apply epsilon noise to regions: first diffusion regions, then reroll regions
374
- all_eps_rerolls = regions + [r for r in reroll_regions if r.reroll_mode == RerollModes.EPSILON.value]
375
- for region in all_eps_rerolls:
376
- if region.noise_eps > 0:
377
- region_noise = init_noise[
378
- :,
379
- :,
380
- region.latent_row_init : region.latent_row_end,
381
- region.latent_col_init : region.latent_col_end,
382
- ]
383
- eps_noise = (
384
- torch.randn(
385
- region_noise.shape, generator=region.get_region_generator(self.device), device=self.device
386
- )
387
- * region.noise_eps
388
- )
389
- init_noise[
390
- :,
391
- :,
392
- region.latent_row_init : region.latent_row_end,
393
- region.latent_col_init : region.latent_col_end,
394
- ] += eps_noise
395
-
396
- # scale the initial noise by the standard deviation required by the scheduler
397
- latents = init_noise * self.scheduler.init_noise_sigma
398
-
399
- # Get unconditional embeddings for classifier free guidance in text2image regions
400
- for region in text2image_regions:
401
- max_length = region.tokenized_prompt.input_ids.shape[-1]
402
- uncond_input = self.tokenizer(
403
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
404
- )
405
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
406
-
407
- # For classifier free guidance, we need to do two forward passes.
408
- # Here we concatenate the unconditional and text embeddings into a single batch
409
- # to avoid doing two forward passes
410
- region.encoded_prompt = torch.cat([uncond_embeddings, region.encoded_prompt])
411
-
412
- # Prepare image latents
413
- for region in image2image_regions:
414
- region.encode_reference_image(self.vae, device=self.device, generator=generator)
415
-
416
- # Prepare mask of weights for each region
417
- mask_builder = MaskWeightsBuilder(latent_space_dim=self.unet.config.in_channels, nbatch=batch_size)
418
- mask_weights = [mask_builder.compute_mask_weights(region).to(self.device) for region in text2image_regions]
419
-
420
- # Diffusion timesteps
421
- for i, t in tqdm(enumerate(self.scheduler.timesteps)):
422
- # Diffuse each region
423
- noise_preds_regions = []
424
-
425
- # text2image regions
426
- for region in text2image_regions:
427
- region_latents = latents[
428
- :,
429
- :,
430
- region.latent_row_init : region.latent_row_end,
431
- region.latent_col_init : region.latent_col_end,
432
- ]
433
- # expand the latents if we are doing classifier free guidance
434
- latent_model_input = torch.cat([region_latents] * 2)
435
- # scale model input following scheduler rules
436
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
437
- # predict the noise residual
438
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=region.encoded_prompt)["sample"]
439
- # perform guidance
440
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
441
- noise_pred_region = noise_pred_uncond + region.guidance_scale * (noise_pred_text - noise_pred_uncond)
442
- noise_preds_regions.append(noise_pred_region)
443
-
444
- # Merge noise predictions for all tiles
445
- noise_pred = torch.zeros(latents.shape, device=self.device)
446
- contributors = torch.zeros(latents.shape, device=self.device)
447
- # Add each tile contribution to overall latents
448
- for region, noise_pred_region, mask_weights_region in zip(
449
- text2image_regions, noise_preds_regions, mask_weights
450
- ):
451
- noise_pred[
452
- :,
453
- :,
454
- region.latent_row_init : region.latent_row_end,
455
- region.latent_col_init : region.latent_col_end,
456
- ] += noise_pred_region * mask_weights_region
457
- contributors[
458
- :,
459
- :,
460
- region.latent_row_init : region.latent_row_end,
461
- region.latent_col_init : region.latent_col_end,
462
- ] += mask_weights_region
463
- # Average overlapping areas with more than 1 contributor
464
- noise_pred /= contributors
465
- noise_pred = torch.nan_to_num(
466
- noise_pred
467
- ) # Replace NaNs by zeros: NaN can appear if a position is not covered by any DiffusionRegion
468
-
469
- # compute the previous noisy sample x_t -> x_t-1
470
- latents = self.scheduler.step(noise_pred, t, latents).prev_sample
471
-
472
- # Image2Image regions: override latents generated by the scheduler
473
- for region in image2image_regions:
474
- influence_step = self.get_latest_timestep_img2img(num_inference_steps, region.strength)
475
- # Only override in the timesteps before the last influence step of the image (given by its strength)
476
- if t > influence_step:
477
- timestep = t.repeat(batch_size)
478
- region_init_noise = init_noise[
479
- :,
480
- :,
481
- region.latent_row_init : region.latent_row_end,
482
- region.latent_col_init : region.latent_col_end,
483
- ]
484
- region_latents = self.scheduler.add_noise(region.reference_latents, region_init_noise, timestep)
485
- latents[
486
- :,
487
- :,
488
- region.latent_row_init : region.latent_row_end,
489
- region.latent_col_init : region.latent_col_end,
490
- ] = region_latents
491
-
492
- if decode_steps:
493
- steps_images.append(self.decode_latents(latents, cpu_vae))
494
-
495
- # scale and decode the image latents with vae
496
- image = self.decode_latents(latents, cpu_vae)
497
-
498
- output = {"images": image}
499
- if decode_steps:
500
- output = {**output, "steps_images": steps_images}
501
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mixture_tiling.py DELETED
@@ -1,405 +0,0 @@
1
- import inspect
2
- from copy import deepcopy
3
- from enum import Enum
4
- from typing import List, Optional, Tuple, Union
5
-
6
- import torch
7
- from tqdm.auto import tqdm
8
-
9
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
10
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline
11
- from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
12
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
13
- from diffusers.utils import logging
14
-
15
-
16
- try:
17
- from ligo.segments import segment
18
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
19
- except ImportError:
20
- raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
21
-
22
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
-
24
- EXAMPLE_DOC_STRING = """
25
- Examples:
26
- ```py
27
- >>> from diffusers import LMSDiscreteScheduler, DiffusionPipeline
28
-
29
- >>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
30
- >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler, custom_pipeline="mixture_tiling")
31
- >>> pipeline.to("cuda")
32
-
33
- >>> image = pipeline(
34
- >>> prompt=[[
35
- >>> "A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
36
- >>> "A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
37
- >>> "An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
38
- >>> ]],
39
- >>> tile_height=640,
40
- >>> tile_width=640,
41
- >>> tile_row_overlap=0,
42
- >>> tile_col_overlap=256,
43
- >>> guidance_scale=8,
44
- >>> seed=7178915308,
45
- >>> num_inference_steps=50,
46
- >>> )["images"][0]
47
- ```
48
- """
49
-
50
-
51
- def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
52
- """Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
53
-
54
- Returns a tuple with:
55
- - Starting coordinates of rows in pixel space
56
- - Ending coordinates of rows in pixel space
57
- - Starting coordinates of columns in pixel space
58
- - Ending coordinates of columns in pixel space
59
- """
60
- px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
61
- px_row_end = px_row_init + tile_height
62
- px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
63
- px_col_end = px_col_init + tile_width
64
- return px_row_init, px_row_end, px_col_init, px_col_end
65
-
66
-
67
- def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
68
- """Translates coordinates in pixel space to coordinates in latent space"""
69
- return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
70
-
71
-
72
- def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
73
- """Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
74
-
75
- Returns a tuple with:
76
- - Starting coordinates of rows in latent space
77
- - Ending coordinates of rows in latent space
78
- - Starting coordinates of columns in latent space
79
- - Ending coordinates of columns in latent space
80
- """
81
- px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
82
- tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
83
- )
84
- return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
85
-
86
-
87
- def _tile2latent_exclusive_indices(
88
- tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
89
- ):
90
- """Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
91
-
92
- Returns a tuple with:
93
- - Starting coordinates of rows in latent space
94
- - Ending coordinates of rows in latent space
95
- - Starting coordinates of columns in latent space
96
- - Ending coordinates of columns in latent space
97
- """
98
- row_init, row_end, col_init, col_end = _tile2latent_indices(
99
- tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
100
- )
101
- row_segment = segment(row_init, row_end)
102
- col_segment = segment(col_init, col_end)
103
- # Iterate over the rest of tiles, clipping the region for the current tile
104
- for row in range(rows):
105
- for column in range(columns):
106
- if row != tile_row and column != tile_col:
107
- clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
108
- row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
109
- )
110
- row_segment = row_segment - segment(clip_row_init, clip_row_end)
111
- col_segment = col_segment - segment(clip_col_init, clip_col_end)
112
- # return row_init, row_end, col_init, col_end
113
- return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
114
-
115
-
116
- class StableDiffusionExtrasMixin:
117
- """Mixin providing additional convenience method to Stable Diffusion pipelines"""
118
-
119
- def decode_latents(self, latents, cpu_vae=False):
120
- """Decodes a given array of latents into pixel space"""
121
- # scale and decode the image latents with vae
122
- if cpu_vae:
123
- lat = deepcopy(latents).cpu()
124
- vae = deepcopy(self.vae).cpu()
125
- else:
126
- lat = latents
127
- vae = self.vae
128
-
129
- lat = 1 / 0.18215 * lat
130
- image = vae.decode(lat).sample
131
-
132
- image = (image / 2 + 0.5).clamp(0, 1)
133
- image = image.cpu().permute(0, 2, 3, 1).numpy()
134
-
135
- return self.numpy_to_pil(image)
136
-
137
-
138
- class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixin):
139
- def __init__(
140
- self,
141
- vae: AutoencoderKL,
142
- text_encoder: CLIPTextModel,
143
- tokenizer: CLIPTokenizer,
144
- unet: UNet2DConditionModel,
145
- scheduler: Union[DDIMScheduler, PNDMScheduler],
146
- safety_checker: StableDiffusionSafetyChecker,
147
- feature_extractor: CLIPFeatureExtractor,
148
- ):
149
- super().__init__()
150
- self.register_modules(
151
- vae=vae,
152
- text_encoder=text_encoder,
153
- tokenizer=tokenizer,
154
- unet=unet,
155
- scheduler=scheduler,
156
- safety_checker=safety_checker,
157
- feature_extractor=feature_extractor,
158
- )
159
-
160
- class SeedTilesMode(Enum):
161
- """Modes in which the latents of a particular tile can be re-seeded"""
162
-
163
- FULL = "full"
164
- EXCLUSIVE = "exclusive"
165
-
166
- @torch.no_grad()
167
- def __call__(
168
- self,
169
- prompt: Union[str, List[List[str]]],
170
- num_inference_steps: Optional[int] = 50,
171
- guidance_scale: Optional[float] = 7.5,
172
- eta: Optional[float] = 0.0,
173
- seed: Optional[int] = None,
174
- tile_height: Optional[int] = 512,
175
- tile_width: Optional[int] = 512,
176
- tile_row_overlap: Optional[int] = 256,
177
- tile_col_overlap: Optional[int] = 256,
178
- guidance_scale_tiles: Optional[List[List[float]]] = None,
179
- seed_tiles: Optional[List[List[int]]] = None,
180
- seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
181
- seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
182
- cpu_vae: Optional[bool] = False,
183
- ):
184
- r"""
185
- Function to run the diffusion pipeline with tiling support.
186
-
187
- Args:
188
- prompt: either a single string (no tiling) or a list of lists with all the prompts to use (one list for each row of tiles). This will also define the tiling structure.
189
- num_inference_steps: number of diffusions steps.
190
- guidance_scale: classifier-free guidance.
191
- seed: general random seed to initialize latents.
192
- tile_height: height in pixels of each grid tile.
193
- tile_width: width in pixels of each grid tile.
194
- tile_row_overlap: number of overlap pixels between tiles in consecutive rows.
195
- tile_col_overlap: number of overlap pixels between tiles in consecutive columns.
196
- guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
197
- guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
198
- seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
199
- seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be overriden. If "exclusive", only the latents that are affected exclusively by this tile (and no other tiles) will be overriden.
200
- seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be overriden using the given seed. Takes priority over seed_tiles.
201
- cpu_vae: the decoder from latent space to pixel space can require too mucho GPU RAM for large images. If you find out of memory errors at the end of the generation process, try setting this parameter to True to run the decoder in CPU. Slower, but should run without memory issues.
202
-
203
- Examples:
204
-
205
- Returns:
206
- A PIL image with the generated image.
207
-
208
- """
209
- if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
210
- raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
211
- grid_rows = len(prompt)
212
- grid_cols = len(prompt[0])
213
- if not all(len(row) == grid_cols for row in prompt):
214
- raise ValueError("All prompt rows must have the same number of prompt columns")
215
- if not isinstance(seed_tiles_mode, str) and (
216
- not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
217
- ):
218
- raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
219
- if isinstance(seed_tiles_mode, str):
220
- seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
221
-
222
- modes = [mode.value for mode in self.SeedTilesMode]
223
- if any(mode not in modes for row in seed_tiles_mode for mode in row):
224
- raise ValueError(f"Seed tiles mode must be one of {modes}")
225
- if seed_reroll_regions is None:
226
- seed_reroll_regions = []
227
- batch_size = 1
228
-
229
- # create original noisy latents using the timesteps
230
- height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
231
- width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
232
- latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
233
- generator = torch.Generator("cuda").manual_seed(seed)
234
- latents = torch.randn(latents_shape, generator=generator, device=self.device)
235
-
236
- # overwrite latents for specific tiles if provided
237
- if seed_tiles is not None:
238
- for row in range(grid_rows):
239
- for col in range(grid_cols):
240
- if (seed_tile := seed_tiles[row][col]) is not None:
241
- mode = seed_tiles_mode[row][col]
242
- if mode == self.SeedTilesMode.FULL.value:
243
- row_init, row_end, col_init, col_end = _tile2latent_indices(
244
- row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
245
- )
246
- else:
247
- row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
248
- row,
249
- col,
250
- tile_width,
251
- tile_height,
252
- tile_row_overlap,
253
- tile_col_overlap,
254
- grid_rows,
255
- grid_cols,
256
- )
257
- tile_generator = torch.Generator("cuda").manual_seed(seed_tile)
258
- tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
259
- latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
260
- tile_shape, generator=tile_generator, device=self.device
261
- )
262
-
263
- # overwrite again for seed reroll regions
264
- for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
265
- row_init, row_end, col_init, col_end = _pixel2latent_indices(
266
- row_init, row_end, col_init, col_end
267
- ) # to latent space coordinates
268
- reroll_generator = torch.Generator("cuda").manual_seed(seed_reroll)
269
- region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
270
- latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
271
- region_shape, generator=reroll_generator, device=self.device
272
- )
273
-
274
- # Prepare scheduler
275
- accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
276
- extra_set_kwargs = {}
277
- if accepts_offset:
278
- extra_set_kwargs["offset"] = 1
279
- self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
280
- # if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
281
- if isinstance(self.scheduler, LMSDiscreteScheduler):
282
- latents = latents * self.scheduler.sigmas[0]
283
-
284
- # get prompts text embeddings
285
- text_input = [
286
- [
287
- self.tokenizer(
288
- col,
289
- padding="max_length",
290
- max_length=self.tokenizer.model_max_length,
291
- truncation=True,
292
- return_tensors="pt",
293
- )
294
- for col in row
295
- ]
296
- for row in prompt
297
- ]
298
- text_embeddings = [[self.text_encoder(col.input_ids.to(self.device))[0] for col in row] for row in text_input]
299
-
300
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
301
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
302
- # corresponds to doing no classifier free guidance.
303
- do_classifier_free_guidance = guidance_scale > 1.0 # TODO: also active if any tile has guidance scale
304
- # get unconditional embeddings for classifier free guidance
305
- if do_classifier_free_guidance:
306
- for i in range(grid_rows):
307
- for j in range(grid_cols):
308
- max_length = text_input[i][j].input_ids.shape[-1]
309
- uncond_input = self.tokenizer(
310
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
311
- )
312
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
313
-
314
- # For classifier free guidance, we need to do two forward passes.
315
- # Here we concatenate the unconditional and text embeddings into a single batch
316
- # to avoid doing two forward passes
317
- text_embeddings[i][j] = torch.cat([uncond_embeddings, text_embeddings[i][j]])
318
-
319
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
320
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
321
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
322
- # and should be between [0, 1]
323
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
324
- extra_step_kwargs = {}
325
- if accepts_eta:
326
- extra_step_kwargs["eta"] = eta
327
-
328
- # Mask for tile weights strength
329
- tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size)
330
-
331
- # Diffusion timesteps
332
- for i, t in tqdm(enumerate(self.scheduler.timesteps)):
333
- # Diffuse each tile
334
- noise_preds = []
335
- for row in range(grid_rows):
336
- noise_preds_row = []
337
- for col in range(grid_cols):
338
- px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
339
- row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
340
- )
341
- tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
342
- # expand the latents if we are doing classifier free guidance
343
- latent_model_input = torch.cat([tile_latents] * 2) if do_classifier_free_guidance else tile_latents
344
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
345
- # predict the noise residual
346
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings[row][col])[
347
- "sample"
348
- ]
349
- # perform guidance
350
- if do_classifier_free_guidance:
351
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
352
- guidance = (
353
- guidance_scale
354
- if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
355
- else guidance_scale_tiles[row][col]
356
- )
357
- noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
358
- noise_preds_row.append(noise_pred_tile)
359
- noise_preds.append(noise_preds_row)
360
- # Stitch noise predictions for all tiles
361
- noise_pred = torch.zeros(latents.shape, device=self.device)
362
- contributors = torch.zeros(latents.shape, device=self.device)
363
- # Add each tile contribution to overall latents
364
- for row in range(grid_rows):
365
- for col in range(grid_cols):
366
- px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
367
- row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
368
- )
369
- noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
370
- noise_preds[row][col] * tile_weights
371
- )
372
- contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
373
- # Average overlapping areas with more than 1 contributor
374
- noise_pred /= contributors
375
-
376
- # compute the previous noisy sample x_t -> x_t-1
377
- latents = self.scheduler.step(noise_pred, t, latents).prev_sample
378
-
379
- # scale and decode the image latents with vae
380
- image = self.decode_latents(latents, cpu_vae)
381
-
382
- return {"images": image}
383
-
384
- def _gaussian_weights(self, tile_width, tile_height, nbatches):
385
- """Generates a gaussian mask of weights for tile contributions"""
386
- import numpy as np
387
- from numpy import exp, pi, sqrt
388
-
389
- latent_width = tile_width // 8
390
- latent_height = tile_height // 8
391
-
392
- var = 0.01
393
- midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
394
- x_probs = [
395
- exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
396
- for x in range(latent_width)
397
- ]
398
- midpoint = latent_height / 2
399
- y_probs = [
400
- exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
401
- for y in range(latent_height)
402
- ]
403
-
404
- weights = np.outer(y_probs, x_probs)
405
- return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
multilingual_stable_diffusion.py DELETED
@@ -1,410 +0,0 @@
1
- import inspect
2
- from typing import Callable, List, Optional, Union
3
-
4
- import torch
5
- from transformers import (
6
- CLIPImageProcessor,
7
- CLIPTextModel,
8
- CLIPTokenizer,
9
- MBart50TokenizerFast,
10
- MBartForConditionalGeneration,
11
- pipeline,
12
- )
13
-
14
- from diffusers.configuration_utils import FrozenDict
15
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
17
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
18
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
19
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
20
- from diffusers.utils import deprecate, logging
21
-
22
-
23
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
-
25
-
26
- def detect_language(pipe, prompt, batch_size):
27
- """helper function to detect language(s) of prompt"""
28
-
29
- if batch_size == 1:
30
- preds = pipe(prompt, top_k=1, truncation=True, max_length=128)
31
- return preds[0]["label"]
32
- else:
33
- detected_languages = []
34
- for p in prompt:
35
- preds = pipe(p, top_k=1, truncation=True, max_length=128)
36
- detected_languages.append(preds[0]["label"])
37
-
38
- return detected_languages
39
-
40
-
41
- def translate_prompt(prompt, translation_tokenizer, translation_model, device):
42
- """helper function to translate prompt to English"""
43
-
44
- encoded_prompt = translation_tokenizer(prompt, return_tensors="pt").to(device)
45
- generated_tokens = translation_model.generate(**encoded_prompt, max_new_tokens=1000)
46
- en_trans = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
47
-
48
- return en_trans[0]
49
-
50
-
51
- class MultilingualStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
52
- r"""
53
- Pipeline for text-to-image generation using Stable Diffusion in different languages.
54
-
55
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
56
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
57
-
58
- Args:
59
- detection_pipeline ([`pipeline`]):
60
- Transformers pipeline to detect prompt's language.
61
- translation_model ([`MBartForConditionalGeneration`]):
62
- Model to translate prompt to English, if necessary. Please refer to the
63
- [model card](https://huggingface.co/docs/transformers/model_doc/mbart) for details.
64
- translation_tokenizer ([`MBart50TokenizerFast`]):
65
- Tokenizer of the translation model.
66
- vae ([`AutoencoderKL`]):
67
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
- text_encoder ([`CLIPTextModel`]):
69
- Frozen text-encoder. Stable Diffusion uses the text portion of
70
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
- tokenizer (`CLIPTokenizer`):
73
- Tokenizer of class
74
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
- scheduler ([`SchedulerMixin`]):
77
- A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
- safety_checker ([`StableDiffusionSafetyChecker`]):
80
- Classification module that estimates whether generated images could be considered offensive or harmful.
81
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
- feature_extractor ([`CLIPImageProcessor`]):
83
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
- """
85
-
86
- def __init__(
87
- self,
88
- detection_pipeline: pipeline,
89
- translation_model: MBartForConditionalGeneration,
90
- translation_tokenizer: MBart50TokenizerFast,
91
- vae: AutoencoderKL,
92
- text_encoder: CLIPTextModel,
93
- tokenizer: CLIPTokenizer,
94
- unet: UNet2DConditionModel,
95
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
96
- safety_checker: StableDiffusionSafetyChecker,
97
- feature_extractor: CLIPImageProcessor,
98
- ):
99
- super().__init__()
100
-
101
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
102
- deprecation_message = (
103
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
104
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
105
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
106
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
107
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
108
- " file"
109
- )
110
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
111
- new_config = dict(scheduler.config)
112
- new_config["steps_offset"] = 1
113
- scheduler._internal_dict = FrozenDict(new_config)
114
-
115
- if safety_checker is None:
116
- logger.warning(
117
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
118
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
119
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
120
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
121
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
122
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
123
- )
124
-
125
- self.register_modules(
126
- detection_pipeline=detection_pipeline,
127
- translation_model=translation_model,
128
- translation_tokenizer=translation_tokenizer,
129
- vae=vae,
130
- text_encoder=text_encoder,
131
- tokenizer=tokenizer,
132
- unet=unet,
133
- scheduler=scheduler,
134
- safety_checker=safety_checker,
135
- feature_extractor=feature_extractor,
136
- )
137
-
138
- @torch.no_grad()
139
- def __call__(
140
- self,
141
- prompt: Union[str, List[str]],
142
- height: int = 512,
143
- width: int = 512,
144
- num_inference_steps: int = 50,
145
- guidance_scale: float = 7.5,
146
- negative_prompt: Optional[Union[str, List[str]]] = None,
147
- num_images_per_prompt: Optional[int] = 1,
148
- eta: float = 0.0,
149
- generator: Optional[torch.Generator] = None,
150
- latents: Optional[torch.Tensor] = None,
151
- output_type: Optional[str] = "pil",
152
- return_dict: bool = True,
153
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
154
- callback_steps: int = 1,
155
- **kwargs,
156
- ):
157
- r"""
158
- Function invoked when calling the pipeline for generation.
159
-
160
- Args:
161
- prompt (`str` or `List[str]`):
162
- The prompt or prompts to guide the image generation. Can be in different languages.
163
- height (`int`, *optional*, defaults to 512):
164
- The height in pixels of the generated image.
165
- width (`int`, *optional*, defaults to 512):
166
- The width in pixels of the generated image.
167
- num_inference_steps (`int`, *optional*, defaults to 50):
168
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
169
- expense of slower inference.
170
- guidance_scale (`float`, *optional*, defaults to 7.5):
171
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
172
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
173
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
174
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
175
- usually at the expense of lower image quality.
176
- negative_prompt (`str` or `List[str]`, *optional*):
177
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
178
- if `guidance_scale` is less than `1`).
179
- num_images_per_prompt (`int`, *optional*, defaults to 1):
180
- The number of images to generate per prompt.
181
- eta (`float`, *optional*, defaults to 0.0):
182
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
183
- [`schedulers.DDIMScheduler`], will be ignored for others.
184
- generator (`torch.Generator`, *optional*):
185
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
186
- deterministic.
187
- latents (`torch.Tensor`, *optional*):
188
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
189
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
190
- tensor will ge generated by sampling using the supplied random `generator`.
191
- output_type (`str`, *optional*, defaults to `"pil"`):
192
- The output format of the generate image. Choose between
193
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
194
- return_dict (`bool`, *optional*, defaults to `True`):
195
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
196
- plain tuple.
197
- callback (`Callable`, *optional*):
198
- A function that will be called every `callback_steps` steps during inference. The function will be
199
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
200
- callback_steps (`int`, *optional*, defaults to 1):
201
- The frequency at which the `callback` function will be called. If not specified, the callback will be
202
- called at every step.
203
-
204
- Returns:
205
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
206
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
207
- When returning a tuple, the first element is a list with the generated images, and the second element is a
208
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
209
- (nsfw) content, according to the `safety_checker`.
210
- """
211
- if isinstance(prompt, str):
212
- batch_size = 1
213
- elif isinstance(prompt, list):
214
- batch_size = len(prompt)
215
- else:
216
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
217
-
218
- if height % 8 != 0 or width % 8 != 0:
219
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
220
-
221
- if (callback_steps is None) or (
222
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
223
- ):
224
- raise ValueError(
225
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
226
- f" {type(callback_steps)}."
227
- )
228
-
229
- # detect language and translate if necessary
230
- prompt_language = detect_language(self.detection_pipeline, prompt, batch_size)
231
- if batch_size == 1 and prompt_language != "en":
232
- prompt = translate_prompt(prompt, self.translation_tokenizer, self.translation_model, self.device)
233
-
234
- if isinstance(prompt, list):
235
- for index in range(batch_size):
236
- if prompt_language[index] != "en":
237
- p = translate_prompt(
238
- prompt[index], self.translation_tokenizer, self.translation_model, self.device
239
- )
240
- prompt[index] = p
241
-
242
- # get prompt text embeddings
243
- text_inputs = self.tokenizer(
244
- prompt,
245
- padding="max_length",
246
- max_length=self.tokenizer.model_max_length,
247
- return_tensors="pt",
248
- )
249
- text_input_ids = text_inputs.input_ids
250
-
251
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
252
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
253
- logger.warning(
254
- "The following part of your input was truncated because CLIP can only handle sequences up to"
255
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
256
- )
257
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
258
- text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
259
-
260
- # duplicate text embeddings for each generation per prompt, using mps friendly method
261
- bs_embed, seq_len, _ = text_embeddings.shape
262
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
263
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
264
-
265
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
266
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
267
- # corresponds to doing no classifier free guidance.
268
- do_classifier_free_guidance = guidance_scale > 1.0
269
- # get unconditional embeddings for classifier free guidance
270
- if do_classifier_free_guidance:
271
- uncond_tokens: List[str]
272
- if negative_prompt is None:
273
- uncond_tokens = [""] * batch_size
274
- elif type(prompt) is not type(negative_prompt):
275
- raise TypeError(
276
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
277
- f" {type(prompt)}."
278
- )
279
- elif isinstance(negative_prompt, str):
280
- # detect language and translate it if necessary
281
- negative_prompt_language = detect_language(self.detection_pipeline, negative_prompt, batch_size)
282
- if negative_prompt_language != "en":
283
- negative_prompt = translate_prompt(
284
- negative_prompt, self.translation_tokenizer, self.translation_model, self.device
285
- )
286
- if isinstance(negative_prompt, str):
287
- uncond_tokens = [negative_prompt]
288
- elif batch_size != len(negative_prompt):
289
- raise ValueError(
290
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
291
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
292
- " the batch size of `prompt`."
293
- )
294
- else:
295
- # detect language and translate it if necessary
296
- if isinstance(negative_prompt, list):
297
- negative_prompt_languages = detect_language(self.detection_pipeline, negative_prompt, batch_size)
298
- for index in range(batch_size):
299
- if negative_prompt_languages[index] != "en":
300
- p = translate_prompt(
301
- negative_prompt[index], self.translation_tokenizer, self.translation_model, self.device
302
- )
303
- negative_prompt[index] = p
304
- uncond_tokens = negative_prompt
305
-
306
- max_length = text_input_ids.shape[-1]
307
- uncond_input = self.tokenizer(
308
- uncond_tokens,
309
- padding="max_length",
310
- max_length=max_length,
311
- truncation=True,
312
- return_tensors="pt",
313
- )
314
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
315
-
316
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
317
- seq_len = uncond_embeddings.shape[1]
318
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
319
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
320
-
321
- # For classifier free guidance, we need to do two forward passes.
322
- # Here we concatenate the unconditional and text embeddings into a single batch
323
- # to avoid doing two forward passes
324
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
325
-
326
- # get the initial random noise unless the user supplied it
327
-
328
- # Unlike in other pipelines, latents need to be generated in the target device
329
- # for 1-to-1 results reproducibility with the CompVis implementation.
330
- # However this currently doesn't work in `mps`.
331
- latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
332
- latents_dtype = text_embeddings.dtype
333
- if latents is None:
334
- if self.device.type == "mps":
335
- # randn does not work reproducibly on mps
336
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
337
- self.device
338
- )
339
- else:
340
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
341
- else:
342
- if latents.shape != latents_shape:
343
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
344
- latents = latents.to(self.device)
345
-
346
- # set timesteps
347
- self.scheduler.set_timesteps(num_inference_steps)
348
-
349
- # Some schedulers like PNDM have timesteps as arrays
350
- # It's more optimized to move all timesteps to correct device beforehand
351
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
352
-
353
- # scale the initial noise by the standard deviation required by the scheduler
354
- latents = latents * self.scheduler.init_noise_sigma
355
-
356
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
357
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
358
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
359
- # and should be between [0, 1]
360
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
361
- extra_step_kwargs = {}
362
- if accepts_eta:
363
- extra_step_kwargs["eta"] = eta
364
-
365
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
366
- # expand the latents if we are doing classifier free guidance
367
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
368
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
369
-
370
- # predict the noise residual
371
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
372
-
373
- # perform guidance
374
- if do_classifier_free_guidance:
375
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
376
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
377
-
378
- # compute the previous noisy sample x_t -> x_t-1
379
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
380
-
381
- # call the callback, if provided
382
- if callback is not None and i % callback_steps == 0:
383
- step_idx = i // getattr(self.scheduler, "order", 1)
384
- callback(step_idx, t, latents)
385
-
386
- latents = 1 / 0.18215 * latents
387
- image = self.vae.decode(latents).sample
388
-
389
- image = (image / 2 + 0.5).clamp(0, 1)
390
-
391
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
392
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
393
-
394
- if self.safety_checker is not None:
395
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
396
- self.device
397
- )
398
- image, has_nsfw_concept = self.safety_checker(
399
- images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
400
- )
401
- else:
402
- has_nsfw_concept = None
403
-
404
- if output_type == "pil":
405
- image = self.numpy_to_pil(image)
406
-
407
- if not return_dict:
408
- return (image, has_nsfw_concept)
409
-
410
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
one_step_unet.py DELETED
@@ -1,24 +0,0 @@
1
- #!/usr/bin/env python3
2
- import torch
3
-
4
- from diffusers import DiffusionPipeline
5
-
6
-
7
- class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
8
- def __init__(self, unet, scheduler):
9
- super().__init__()
10
-
11
- self.register_modules(unet=unet, scheduler=scheduler)
12
-
13
- def __call__(self):
14
- image = torch.randn(
15
- (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
16
- )
17
- timestep = 1
18
-
19
- model_output = self.unet(image, timestep).sample
20
- scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
21
-
22
- result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output)
23
-
24
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_animatediff_controlnet.py DELETED
@@ -1,1125 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
-
18
- import numpy as np
19
- import torch
20
- import torch.nn.functional as F
21
- from PIL import Image
22
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
23
-
24
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
25
- from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
- from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel
27
- from diffusers.models.lora import adjust_lora_scale_text_encoder
28
- from diffusers.models.unets.unet_motion_model import MotionAdapter
29
- from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
30
- from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
31
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
32
- from diffusers.schedulers import (
33
- DDIMScheduler,
34
- DPMSolverMultistepScheduler,
35
- EulerAncestralDiscreteScheduler,
36
- EulerDiscreteScheduler,
37
- LMSDiscreteScheduler,
38
- PNDMScheduler,
39
- )
40
- from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
41
- from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
42
-
43
-
44
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
-
46
- EXAMPLE_DOC_STRING = """
47
- Examples:
48
- ```py
49
- >>> import torch
50
- >>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter
51
- >>> from diffusers.pipelines import DiffusionPipeline
52
- >>> from diffusers.schedulers import DPMSolverMultistepScheduler
53
- >>> from PIL import Image
54
-
55
- >>> motion_id = "guoyww/animatediff-motion-adapter-v1-5-2"
56
- >>> adapter = MotionAdapter.from_pretrained(motion_id)
57
- >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16)
58
- >>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
59
-
60
- >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
61
- >>> pipe = DiffusionPipeline.from_pretrained(
62
- ... model_id,
63
- ... motion_adapter=adapter,
64
- ... controlnet=controlnet,
65
- ... vae=vae,
66
- ... custom_pipeline="pipeline_animatediff_controlnet",
67
- ... ).to(device="cuda", dtype=torch.float16)
68
- >>> pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained(
69
- ... model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1, beta_schedule="linear",
70
- ... )
71
- >>> pipe.enable_vae_slicing()
72
-
73
- >>> conditioning_frames = []
74
- >>> for i in range(1, 16 + 1):
75
- ... conditioning_frames.append(Image.open(f"frame_{i}.png"))
76
-
77
- >>> prompt = "astronaut in space, dancing"
78
- >>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
79
- >>> result = pipe(
80
- ... prompt=prompt,
81
- ... negative_prompt=negative_prompt,
82
- ... width=512,
83
- ... height=768,
84
- ... conditioning_frames=conditioning_frames,
85
- ... num_inference_steps=12,
86
- ... )
87
-
88
- >>> from diffusers.utils import export_to_gif
89
- >>> export_to_gif(result.frames[0], "result.gif")
90
- ```
91
- """
92
-
93
-
94
- # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
95
- def tensor2vid(video: torch.Tensor, processor, output_type="np"):
96
- batch_size, channels, num_frames, height, width = video.shape
97
- outputs = []
98
- for batch_idx in range(batch_size):
99
- batch_vid = video[batch_idx].permute(1, 0, 2, 3)
100
- batch_output = processor.postprocess(batch_vid, output_type)
101
-
102
- outputs.append(batch_output)
103
-
104
- if output_type == "np":
105
- outputs = np.stack(outputs)
106
-
107
- elif output_type == "pt":
108
- outputs = torch.stack(outputs)
109
-
110
- elif not output_type == "pil":
111
- raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
112
-
113
- return outputs
114
-
115
-
116
- class AnimateDiffControlNetPipeline(
117
- DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin
118
- ):
119
- r"""
120
- Pipeline for text-to-video generation.
121
-
122
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
123
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
124
-
125
- The pipeline also inherits the following loading methods:
126
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
127
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
128
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
129
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
130
-
131
- Args:
132
- vae ([`AutoencoderKL`]):
133
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
134
- text_encoder ([`CLIPTextModel`]):
135
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
136
- tokenizer (`CLIPTokenizer`):
137
- A [`~transformers.CLIPTokenizer`] to tokenize text.
138
- unet ([`UNet2DConditionModel`]):
139
- A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
140
- motion_adapter ([`MotionAdapter`]):
141
- A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
142
- scheduler ([`SchedulerMixin`]):
143
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
144
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
145
- """
146
-
147
- model_cpu_offload_seq = "text_encoder->unet->vae"
148
- _optional_components = ["feature_extractor", "image_encoder"]
149
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
150
-
151
- def __init__(
152
- self,
153
- vae: AutoencoderKL,
154
- text_encoder: CLIPTextModel,
155
- tokenizer: CLIPTokenizer,
156
- unet: UNet2DConditionModel,
157
- motion_adapter: MotionAdapter,
158
- controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
159
- scheduler: Union[
160
- DDIMScheduler,
161
- PNDMScheduler,
162
- LMSDiscreteScheduler,
163
- EulerDiscreteScheduler,
164
- EulerAncestralDiscreteScheduler,
165
- DPMSolverMultistepScheduler,
166
- ],
167
- feature_extractor: Optional[CLIPImageProcessor] = None,
168
- image_encoder: Optional[CLIPVisionModelWithProjection] = None,
169
- ):
170
- super().__init__()
171
- unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
172
-
173
- if isinstance(controlnet, (list, tuple)):
174
- controlnet = MultiControlNetModel(controlnet)
175
-
176
- self.register_modules(
177
- vae=vae,
178
- text_encoder=text_encoder,
179
- tokenizer=tokenizer,
180
- unet=unet,
181
- motion_adapter=motion_adapter,
182
- controlnet=controlnet,
183
- scheduler=scheduler,
184
- feature_extractor=feature_extractor,
185
- image_encoder=image_encoder,
186
- )
187
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
188
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
189
- self.control_image_processor = VaeImageProcessor(
190
- vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
191
- )
192
-
193
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
194
- def encode_prompt(
195
- self,
196
- prompt,
197
- device,
198
- num_images_per_prompt,
199
- do_classifier_free_guidance,
200
- negative_prompt=None,
201
- prompt_embeds: Optional[torch.Tensor] = None,
202
- negative_prompt_embeds: Optional[torch.Tensor] = None,
203
- lora_scale: Optional[float] = None,
204
- clip_skip: Optional[int] = None,
205
- ):
206
- r"""
207
- Encodes the prompt into text encoder hidden states.
208
-
209
- Args:
210
- prompt (`str` or `List[str]`, *optional*):
211
- prompt to be encoded
212
- device: (`torch.device`):
213
- torch device
214
- num_images_per_prompt (`int`):
215
- number of images that should be generated per prompt
216
- do_classifier_free_guidance (`bool`):
217
- whether to use classifier free guidance or not
218
- negative_prompt (`str` or `List[str]`, *optional*):
219
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
220
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
221
- less than `1`).
222
- prompt_embeds (`torch.Tensor`, *optional*):
223
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
224
- provided, text embeddings will be generated from `prompt` input argument.
225
- negative_prompt_embeds (`torch.Tensor`, *optional*):
226
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
227
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
228
- argument.
229
- lora_scale (`float`, *optional*):
230
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
231
- clip_skip (`int`, *optional*):
232
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
233
- the output of the pre-final layer will be used for computing the prompt embeddings.
234
- """
235
- # set lora scale so that monkey patched LoRA
236
- # function of text encoder can correctly access it
237
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
238
- self._lora_scale = lora_scale
239
-
240
- # dynamically adjust the LoRA scale
241
- if not USE_PEFT_BACKEND:
242
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
243
- else:
244
- scale_lora_layers(self.text_encoder, lora_scale)
245
-
246
- if prompt is not None and isinstance(prompt, str):
247
- batch_size = 1
248
- elif prompt is not None and isinstance(prompt, list):
249
- batch_size = len(prompt)
250
- else:
251
- batch_size = prompt_embeds.shape[0]
252
-
253
- if prompt_embeds is None:
254
- # textual inversion: process multi-vector tokens if necessary
255
- if isinstance(self, TextualInversionLoaderMixin):
256
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
257
-
258
- text_inputs = self.tokenizer(
259
- prompt,
260
- padding="max_length",
261
- max_length=self.tokenizer.model_max_length,
262
- truncation=True,
263
- return_tensors="pt",
264
- )
265
- text_input_ids = text_inputs.input_ids
266
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
267
-
268
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
269
- text_input_ids, untruncated_ids
270
- ):
271
- removed_text = self.tokenizer.batch_decode(
272
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
273
- )
274
- logger.warning(
275
- "The following part of your input was truncated because CLIP can only handle sequences up to"
276
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
277
- )
278
-
279
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
280
- attention_mask = text_inputs.attention_mask.to(device)
281
- else:
282
- attention_mask = None
283
-
284
- if clip_skip is None:
285
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
286
- prompt_embeds = prompt_embeds[0]
287
- else:
288
- prompt_embeds = self.text_encoder(
289
- text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
290
- )
291
- # Access the `hidden_states` first, that contains a tuple of
292
- # all the hidden states from the encoder layers. Then index into
293
- # the tuple to access the hidden states from the desired layer.
294
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
295
- # We also need to apply the final LayerNorm here to not mess with the
296
- # representations. The `last_hidden_states` that we typically use for
297
- # obtaining the final prompt representations passes through the LayerNorm
298
- # layer.
299
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
300
-
301
- if self.text_encoder is not None:
302
- prompt_embeds_dtype = self.text_encoder.dtype
303
- elif self.unet is not None:
304
- prompt_embeds_dtype = self.unet.dtype
305
- else:
306
- prompt_embeds_dtype = prompt_embeds.dtype
307
-
308
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
309
-
310
- bs_embed, seq_len, _ = prompt_embeds.shape
311
- # duplicate text embeddings for each generation per prompt, using mps friendly method
312
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
313
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
314
-
315
- # get unconditional embeddings for classifier free guidance
316
- if do_classifier_free_guidance and negative_prompt_embeds is None:
317
- uncond_tokens: List[str]
318
- if negative_prompt is None:
319
- uncond_tokens = [""] * batch_size
320
- elif prompt is not None and type(prompt) is not type(negative_prompt):
321
- raise TypeError(
322
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
323
- f" {type(prompt)}."
324
- )
325
- elif isinstance(negative_prompt, str):
326
- uncond_tokens = [negative_prompt]
327
- elif batch_size != len(negative_prompt):
328
- raise ValueError(
329
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
330
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
331
- " the batch size of `prompt`."
332
- )
333
- else:
334
- uncond_tokens = negative_prompt
335
-
336
- # textual inversion: process multi-vector tokens if necessary
337
- if isinstance(self, TextualInversionLoaderMixin):
338
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
339
-
340
- max_length = prompt_embeds.shape[1]
341
- uncond_input = self.tokenizer(
342
- uncond_tokens,
343
- padding="max_length",
344
- max_length=max_length,
345
- truncation=True,
346
- return_tensors="pt",
347
- )
348
-
349
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
350
- attention_mask = uncond_input.attention_mask.to(device)
351
- else:
352
- attention_mask = None
353
-
354
- negative_prompt_embeds = self.text_encoder(
355
- uncond_input.input_ids.to(device),
356
- attention_mask=attention_mask,
357
- )
358
- negative_prompt_embeds = negative_prompt_embeds[0]
359
-
360
- if do_classifier_free_guidance:
361
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
362
- seq_len = negative_prompt_embeds.shape[1]
363
-
364
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
365
-
366
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
367
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
368
-
369
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
370
- # Retrieve the original scale by scaling back the LoRA layers
371
- unscale_lora_layers(self.text_encoder, lora_scale)
372
-
373
- return prompt_embeds, negative_prompt_embeds
374
-
375
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
376
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
377
- dtype = next(self.image_encoder.parameters()).dtype
378
-
379
- if not isinstance(image, torch.Tensor):
380
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
381
-
382
- image = image.to(device=device, dtype=dtype)
383
- if output_hidden_states:
384
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
385
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
386
- uncond_image_enc_hidden_states = self.image_encoder(
387
- torch.zeros_like(image), output_hidden_states=True
388
- ).hidden_states[-2]
389
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
390
- num_images_per_prompt, dim=0
391
- )
392
- return image_enc_hidden_states, uncond_image_enc_hidden_states
393
- else:
394
- image_embeds = self.image_encoder(image).image_embeds
395
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
396
- uncond_image_embeds = torch.zeros_like(image_embeds)
397
-
398
- return image_embeds, uncond_image_embeds
399
-
400
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
401
- def prepare_ip_adapter_image_embeds(
402
- self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
403
- ):
404
- if ip_adapter_image_embeds is None:
405
- if not isinstance(ip_adapter_image, list):
406
- ip_adapter_image = [ip_adapter_image]
407
-
408
- if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
409
- raise ValueError(
410
- f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
411
- )
412
-
413
- image_embeds = []
414
- for single_ip_adapter_image, image_proj_layer in zip(
415
- ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
416
- ):
417
- output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
418
- single_image_embeds, single_negative_image_embeds = self.encode_image(
419
- single_ip_adapter_image, device, 1, output_hidden_state
420
- )
421
- single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
422
- single_negative_image_embeds = torch.stack(
423
- [single_negative_image_embeds] * num_images_per_prompt, dim=0
424
- )
425
-
426
- if self.do_classifier_free_guidance:
427
- single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
428
- single_image_embeds = single_image_embeds.to(device)
429
-
430
- image_embeds.append(single_image_embeds)
431
- else:
432
- image_embeds = ip_adapter_image_embeds
433
- return image_embeds
434
-
435
- # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
436
- def decode_latents(self, latents):
437
- latents = 1 / self.vae.config.scaling_factor * latents
438
-
439
- batch_size, channels, num_frames, height, width = latents.shape
440
- latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
441
-
442
- image = self.vae.decode(latents).sample
443
- video = (
444
- image[None, :]
445
- .reshape(
446
- (
447
- batch_size,
448
- num_frames,
449
- -1,
450
- )
451
- + image.shape[2:]
452
- )
453
- .permute(0, 2, 1, 3, 4)
454
- )
455
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
456
- video = video.float()
457
- return video
458
-
459
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
460
- def prepare_extra_step_kwargs(self, generator, eta):
461
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
462
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
463
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
464
- # and should be between [0, 1]
465
-
466
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
467
- extra_step_kwargs = {}
468
- if accepts_eta:
469
- extra_step_kwargs["eta"] = eta
470
-
471
- # check if the scheduler accepts generator
472
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
473
- if accepts_generator:
474
- extra_step_kwargs["generator"] = generator
475
- return extra_step_kwargs
476
-
477
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
478
- def check_inputs(
479
- self,
480
- prompt,
481
- height,
482
- width,
483
- num_frames,
484
- callback_steps,
485
- negative_prompt=None,
486
- prompt_embeds=None,
487
- negative_prompt_embeds=None,
488
- callback_on_step_end_tensor_inputs=None,
489
- image=None,
490
- controlnet_conditioning_scale=1.0,
491
- control_guidance_start=0.0,
492
- control_guidance_end=1.0,
493
- ):
494
- if height % 8 != 0 or width % 8 != 0:
495
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
496
-
497
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
498
- raise ValueError(
499
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
500
- f" {type(callback_steps)}."
501
- )
502
- if callback_on_step_end_tensor_inputs is not None and not all(
503
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
504
- ):
505
- raise ValueError(
506
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
507
- )
508
-
509
- if prompt is not None and prompt_embeds is not None:
510
- raise ValueError(
511
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
512
- " only forward one of the two."
513
- )
514
- elif prompt is None and prompt_embeds is None:
515
- raise ValueError(
516
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
517
- )
518
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
519
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
520
-
521
- if negative_prompt is not None and negative_prompt_embeds is not None:
522
- raise ValueError(
523
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
524
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
525
- )
526
-
527
- if prompt_embeds is not None and negative_prompt_embeds is not None:
528
- if prompt_embeds.shape != negative_prompt_embeds.shape:
529
- raise ValueError(
530
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
531
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
532
- f" {negative_prompt_embeds.shape}."
533
- )
534
-
535
- # `prompt` needs more sophisticated handling when there are multiple
536
- # conditionings.
537
- if isinstance(self.controlnet, MultiControlNetModel):
538
- if isinstance(prompt, list):
539
- logger.warning(
540
- f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
541
- " prompts. The conditionings will be fixed across the prompts."
542
- )
543
-
544
- # Check `image`
545
- is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
546
- self.controlnet, torch._dynamo.eval_frame.OptimizedModule
547
- )
548
- if (
549
- isinstance(self.controlnet, ControlNetModel)
550
- or is_compiled
551
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
552
- ):
553
- if not isinstance(image, list):
554
- raise TypeError(f"For single controlnet, `image` must be of type `list` but got {type(image)}")
555
- if len(image) != num_frames:
556
- raise ValueError(f"Excepted image to have length {num_frames} but got {len(image)=}")
557
- elif (
558
- isinstance(self.controlnet, MultiControlNetModel)
559
- or is_compiled
560
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
561
- ):
562
- if not isinstance(image, list) or not isinstance(image[0], list):
563
- raise TypeError(f"For multiple controlnets: `image` must be type list of lists but got {type(image)=}")
564
- if len(image[0]) != num_frames:
565
- raise ValueError(f"Expected length of image sublist as {num_frames} but got {len(image[0])=}")
566
- if any(len(img) != len(image[0]) for img in image):
567
- raise ValueError("All conditioning frame batches for multicontrolnet must be same size")
568
- else:
569
- assert False
570
-
571
- # Check `controlnet_conditioning_scale`
572
- if (
573
- isinstance(self.controlnet, ControlNetModel)
574
- or is_compiled
575
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
576
- ):
577
- if not isinstance(controlnet_conditioning_scale, float):
578
- raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
579
- elif (
580
- isinstance(self.controlnet, MultiControlNetModel)
581
- or is_compiled
582
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
583
- ):
584
- if isinstance(controlnet_conditioning_scale, list):
585
- if any(isinstance(i, list) for i in controlnet_conditioning_scale):
586
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
587
- elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
588
- self.controlnet.nets
589
- ):
590
- raise ValueError(
591
- "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
592
- " the same length as the number of controlnets"
593
- )
594
- else:
595
- assert False
596
-
597
- if not isinstance(control_guidance_start, (tuple, list)):
598
- control_guidance_start = [control_guidance_start]
599
-
600
- if not isinstance(control_guidance_end, (tuple, list)):
601
- control_guidance_end = [control_guidance_end]
602
-
603
- if len(control_guidance_start) != len(control_guidance_end):
604
- raise ValueError(
605
- f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
606
- )
607
-
608
- if isinstance(self.controlnet, MultiControlNetModel):
609
- if len(control_guidance_start) != len(self.controlnet.nets):
610
- raise ValueError(
611
- f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
612
- )
613
-
614
- for start, end in zip(control_guidance_start, control_guidance_end):
615
- if start >= end:
616
- raise ValueError(
617
- f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
618
- )
619
- if start < 0.0:
620
- raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
621
- if end > 1.0:
622
- raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
623
-
624
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
625
- def check_image(self, image, prompt, prompt_embeds):
626
- image_is_pil = isinstance(image, Image.Image)
627
- image_is_tensor = isinstance(image, torch.Tensor)
628
- image_is_np = isinstance(image, np.ndarray)
629
- image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image)
630
- image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
631
- image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
632
-
633
- if (
634
- not image_is_pil
635
- and not image_is_tensor
636
- and not image_is_np
637
- and not image_is_pil_list
638
- and not image_is_tensor_list
639
- and not image_is_np_list
640
- ):
641
- raise TypeError(
642
- f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
643
- )
644
-
645
- if image_is_pil:
646
- image_batch_size = 1
647
- else:
648
- image_batch_size = len(image)
649
-
650
- if prompt is not None and isinstance(prompt, str):
651
- prompt_batch_size = 1
652
- elif prompt is not None and isinstance(prompt, list):
653
- prompt_batch_size = len(prompt)
654
- elif prompt_embeds is not None:
655
- prompt_batch_size = prompt_embeds.shape[0]
656
-
657
- if image_batch_size != 1 and image_batch_size != prompt_batch_size:
658
- raise ValueError(
659
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
660
- )
661
-
662
- # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
663
- def prepare_latents(
664
- self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
665
- ):
666
- shape = (
667
- batch_size,
668
- num_channels_latents,
669
- num_frames,
670
- height // self.vae_scale_factor,
671
- width // self.vae_scale_factor,
672
- )
673
- if isinstance(generator, list) and len(generator) != batch_size:
674
- raise ValueError(
675
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
676
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
677
- )
678
-
679
- if latents is None:
680
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
681
- else:
682
- latents = latents.to(device)
683
-
684
- # scale the initial noise by the standard deviation required by the scheduler
685
- latents = latents * self.scheduler.init_noise_sigma
686
- return latents
687
-
688
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
689
- def prepare_image(
690
- self,
691
- image,
692
- width,
693
- height,
694
- batch_size,
695
- num_images_per_prompt,
696
- device,
697
- dtype,
698
- do_classifier_free_guidance=False,
699
- guess_mode=False,
700
- ):
701
- image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
702
- image_batch_size = image.shape[0]
703
-
704
- if image_batch_size == 1:
705
- repeat_by = batch_size
706
- else:
707
- # image batch size is the same as prompt batch size
708
- repeat_by = num_images_per_prompt
709
-
710
- image = image.repeat_interleave(repeat_by, dim=0)
711
-
712
- image = image.to(device=device, dtype=dtype)
713
-
714
- if do_classifier_free_guidance and not guess_mode:
715
- image = torch.cat([image] * 2)
716
-
717
- return image
718
-
719
- @property
720
- def guidance_scale(self):
721
- return self._guidance_scale
722
-
723
- @property
724
- def clip_skip(self):
725
- return self._clip_skip
726
-
727
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
728
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
729
- # corresponds to doing no classifier free guidance.
730
- @property
731
- def do_classifier_free_guidance(self):
732
- return self._guidance_scale > 1
733
-
734
- @property
735
- def cross_attention_kwargs(self):
736
- return self._cross_attention_kwargs
737
-
738
- @property
739
- def num_timesteps(self):
740
- return self._num_timesteps
741
-
742
- @torch.no_grad()
743
- def __call__(
744
- self,
745
- prompt: Union[str, List[str]] = None,
746
- num_frames: Optional[int] = 16,
747
- height: Optional[int] = None,
748
- width: Optional[int] = None,
749
- num_inference_steps: int = 50,
750
- guidance_scale: float = 7.5,
751
- negative_prompt: Optional[Union[str, List[str]]] = None,
752
- num_videos_per_prompt: Optional[int] = 1,
753
- eta: float = 0.0,
754
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
755
- latents: Optional[torch.Tensor] = None,
756
- prompt_embeds: Optional[torch.Tensor] = None,
757
- negative_prompt_embeds: Optional[torch.Tensor] = None,
758
- ip_adapter_image: Optional[PipelineImageInput] = None,
759
- ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
760
- conditioning_frames: Optional[List[PipelineImageInput]] = None,
761
- output_type: Optional[str] = "pil",
762
- return_dict: bool = True,
763
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
764
- controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
765
- guess_mode: bool = False,
766
- control_guidance_start: Union[float, List[float]] = 0.0,
767
- control_guidance_end: Union[float, List[float]] = 1.0,
768
- clip_skip: Optional[int] = None,
769
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
770
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
771
- **kwargs,
772
- ):
773
- r"""
774
- The call function to the pipeline for generation.
775
-
776
- Args:
777
- prompt (`str` or `List[str]`, *optional*):
778
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
779
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
780
- The height in pixels of the generated video.
781
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
782
- The width in pixels of the generated video.
783
- num_frames (`int`, *optional*, defaults to 16):
784
- The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
785
- amounts to 2 seconds of video.
786
- num_inference_steps (`int`, *optional*, defaults to 50):
787
- The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
788
- expense of slower inference.
789
- guidance_scale (`float`, *optional*, defaults to 7.5):
790
- A higher guidance scale value encourages the model to generate images closely linked to the text
791
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
792
- negative_prompt (`str` or `List[str]`, *optional*):
793
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
794
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
795
- eta (`float`, *optional*, defaults to 0.0):
796
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
797
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
798
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
799
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
800
- generation deterministic.
801
- latents (`torch.Tensor`, *optional*):
802
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
803
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
804
- tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
805
- `(batch_size, num_channel, num_frames, height, width)`.
806
- prompt_embeds (`torch.Tensor`, *optional*):
807
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
808
- provided, text embeddings are generated from the `prompt` input argument.
809
- negative_prompt_embeds (`torch.Tensor`, *optional*):
810
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
811
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
812
- ip_adapter_image (`PipelineImageInput`, *optional*):
813
- Optional image input to work with IP Adapters.
814
- ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
815
- Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
816
- Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
817
- if `do_classifier_free_guidance` is set to `True`.
818
- If not provided, embeddings are computed from the `ip_adapter_image` input argument.
819
- conditioning_frames (`List[PipelineImageInput]`, *optional*):
820
- The ControlNet input condition to provide guidance to the `unet` for generation. If multiple ControlNets
821
- are specified, images must be passed as a list such that each element of the list can be correctly
822
- batched for input to a single ControlNet.
823
- output_type (`str`, *optional*, defaults to `"pil"`):
824
- The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or
825
- `np.array`.
826
- return_dict (`bool`, *optional*, defaults to `True`):
827
- Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
828
- of a plain tuple.
829
- cross_attention_kwargs (`dict`, *optional*):
830
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
831
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
832
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
833
- The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
834
- to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
835
- the corresponding scale as a list.
836
- guess_mode (`bool`, *optional*, defaults to `False`):
837
- The ControlNet encoder tries to recognize the content of the input image even if you remove all
838
- prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
839
- control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
840
- The percentage of total steps at which the ControlNet starts applying.
841
- control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
842
- The percentage of total steps at which the ControlNet stops applying.
843
- clip_skip (`int`, *optional*):
844
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
845
- the output of the pre-final layer will be used for computing the prompt embeddings.
846
- callback_on_step_end (`Callable`, *optional*):
847
- A function that calls at the end of each denoising steps during the inference. The function is called
848
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
849
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
850
- `callback_on_step_end_tensor_inputs`.
851
- callback_on_step_end_tensor_inputs (`List`, *optional*):
852
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
853
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
854
- `._callback_tensor_inputs` attribute of your pipeline class.
855
-
856
- Examples:
857
-
858
- Returns:
859
- [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
860
- If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
861
- returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
862
- """
863
-
864
- callback = kwargs.pop("callback", None)
865
- callback_steps = kwargs.pop("callback_steps", None)
866
-
867
- if callback is not None:
868
- deprecate(
869
- "callback",
870
- "1.0.0",
871
- "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
872
- )
873
- if callback_steps is not None:
874
- deprecate(
875
- "callback_steps",
876
- "1.0.0",
877
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
878
- )
879
-
880
- controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
881
-
882
- # align format for control guidance
883
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
884
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
885
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
886
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
887
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
888
- mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
889
- control_guidance_start, control_guidance_end = (
890
- mult * [control_guidance_start],
891
- mult * [control_guidance_end],
892
- )
893
-
894
- # 0. Default height and width to unet
895
- height = height or self.unet.config.sample_size * self.vae_scale_factor
896
- width = width or self.unet.config.sample_size * self.vae_scale_factor
897
-
898
- num_videos_per_prompt = 1
899
-
900
- # 1. Check inputs. Raise error if not correct
901
- self.check_inputs(
902
- prompt=prompt,
903
- height=height,
904
- width=width,
905
- num_frames=num_frames,
906
- callback_steps=callback_steps,
907
- negative_prompt=negative_prompt,
908
- callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
909
- prompt_embeds=prompt_embeds,
910
- negative_prompt_embeds=negative_prompt_embeds,
911
- image=conditioning_frames,
912
- controlnet_conditioning_scale=controlnet_conditioning_scale,
913
- control_guidance_start=control_guidance_start,
914
- control_guidance_end=control_guidance_end,
915
- )
916
-
917
- self._guidance_scale = guidance_scale
918
- self._clip_skip = clip_skip
919
- self._cross_attention_kwargs = cross_attention_kwargs
920
-
921
- # 2. Define call parameters
922
- if prompt is not None and isinstance(prompt, str):
923
- batch_size = 1
924
- elif prompt is not None and isinstance(prompt, list):
925
- batch_size = len(prompt)
926
- else:
927
- batch_size = prompt_embeds.shape[0]
928
-
929
- device = self._execution_device
930
-
931
- if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
932
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
933
-
934
- global_pool_conditions = (
935
- controlnet.config.global_pool_conditions
936
- if isinstance(controlnet, ControlNetModel)
937
- else controlnet.nets[0].config.global_pool_conditions
938
- )
939
- guess_mode = guess_mode or global_pool_conditions
940
-
941
- # 3. Encode input prompt
942
- text_encoder_lora_scale = (
943
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
944
- )
945
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
946
- prompt,
947
- device,
948
- num_videos_per_prompt,
949
- self.do_classifier_free_guidance,
950
- negative_prompt,
951
- prompt_embeds=prompt_embeds,
952
- negative_prompt_embeds=negative_prompt_embeds,
953
- lora_scale=text_encoder_lora_scale,
954
- clip_skip=self.clip_skip,
955
- )
956
- # For classifier free guidance, we need to do two forward passes.
957
- # Here we concatenate the unconditional and text embeddings into a single batch
958
- # to avoid doing two forward passes
959
- if self.do_classifier_free_guidance:
960
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
961
-
962
- if ip_adapter_image is not None:
963
- image_embeds = self.prepare_ip_adapter_image_embeds(
964
- ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt
965
- )
966
-
967
- if isinstance(controlnet, ControlNetModel):
968
- conditioning_frames = self.prepare_image(
969
- image=conditioning_frames,
970
- width=width,
971
- height=height,
972
- batch_size=batch_size * num_videos_per_prompt * num_frames,
973
- num_images_per_prompt=num_videos_per_prompt,
974
- device=device,
975
- dtype=controlnet.dtype,
976
- do_classifier_free_guidance=self.do_classifier_free_guidance,
977
- guess_mode=guess_mode,
978
- )
979
- elif isinstance(controlnet, MultiControlNetModel):
980
- cond_prepared_frames = []
981
- for frame_ in conditioning_frames:
982
- prepared_frame = self.prepare_image(
983
- image=frame_,
984
- width=width,
985
- height=height,
986
- batch_size=batch_size * num_videos_per_prompt * num_frames,
987
- num_images_per_prompt=num_videos_per_prompt,
988
- device=device,
989
- dtype=controlnet.dtype,
990
- do_classifier_free_guidance=self.do_classifier_free_guidance,
991
- guess_mode=guess_mode,
992
- )
993
- cond_prepared_frames.append(prepared_frame)
994
- conditioning_frames = cond_prepared_frames
995
- else:
996
- assert False
997
-
998
- # 4. Prepare timesteps
999
- self.scheduler.set_timesteps(num_inference_steps, device=device)
1000
- timesteps = self.scheduler.timesteps
1001
- self._num_timesteps = len(timesteps)
1002
-
1003
- # 5. Prepare latent variables
1004
- num_channels_latents = self.unet.config.in_channels
1005
- latents = self.prepare_latents(
1006
- batch_size * num_videos_per_prompt,
1007
- num_channels_latents,
1008
- num_frames,
1009
- height,
1010
- width,
1011
- prompt_embeds.dtype,
1012
- device,
1013
- generator,
1014
- latents,
1015
- )
1016
-
1017
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1018
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1019
-
1020
- # 7. Add image embeds for IP-Adapter
1021
- added_cond_kwargs = (
1022
- {"image_embeds": image_embeds}
1023
- if ip_adapter_image is not None or ip_adapter_image_embeds is not None
1024
- else None
1025
- )
1026
-
1027
- # 7.1 Create tensor stating which controlnets to keep
1028
- controlnet_keep = []
1029
- for i in range(len(timesteps)):
1030
- keeps = [
1031
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1032
- for s, e in zip(control_guidance_start, control_guidance_end)
1033
- ]
1034
- controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1035
-
1036
- # 8. Denoising loop
1037
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1038
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1039
- for i, t in enumerate(timesteps):
1040
- # expand the latents if we are doing classifier free guidance
1041
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1042
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1043
-
1044
- if guess_mode and self.do_classifier_free_guidance:
1045
- # Infer ControlNet only for the conditional batch.
1046
- control_model_input = latents
1047
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1048
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1049
- else:
1050
- control_model_input = latent_model_input
1051
- controlnet_prompt_embeds = prompt_embeds
1052
- controlnet_prompt_embeds = controlnet_prompt_embeds.repeat_interleave(num_frames, dim=0)
1053
-
1054
- if isinstance(controlnet_keep[i], list):
1055
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1056
- else:
1057
- controlnet_cond_scale = controlnet_conditioning_scale
1058
- if isinstance(controlnet_cond_scale, list):
1059
- controlnet_cond_scale = controlnet_cond_scale[0]
1060
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
1061
-
1062
- control_model_input = torch.transpose(control_model_input, 1, 2)
1063
- control_model_input = control_model_input.reshape(
1064
- (-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])
1065
- )
1066
-
1067
- down_block_res_samples, mid_block_res_sample = self.controlnet(
1068
- control_model_input,
1069
- t,
1070
- encoder_hidden_states=controlnet_prompt_embeds,
1071
- controlnet_cond=conditioning_frames,
1072
- conditioning_scale=cond_scale,
1073
- guess_mode=guess_mode,
1074
- return_dict=False,
1075
- )
1076
-
1077
- # predict the noise residual
1078
- noise_pred = self.unet(
1079
- latent_model_input,
1080
- t,
1081
- encoder_hidden_states=prompt_embeds,
1082
- cross_attention_kwargs=self.cross_attention_kwargs,
1083
- added_cond_kwargs=added_cond_kwargs,
1084
- down_block_additional_residuals=down_block_res_samples,
1085
- mid_block_additional_residual=mid_block_res_sample,
1086
- ).sample
1087
-
1088
- # perform guidance
1089
- if self.do_classifier_free_guidance:
1090
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1091
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1092
-
1093
- # compute the previous noisy sample x_t -> x_t-1
1094
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1095
-
1096
- if callback_on_step_end is not None:
1097
- callback_kwargs = {}
1098
- for k in callback_on_step_end_tensor_inputs:
1099
- callback_kwargs[k] = locals()[k]
1100
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1101
-
1102
- latents = callback_outputs.pop("latents", latents)
1103
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1104
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1105
-
1106
- # call the callback, if provided
1107
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1108
- progress_bar.update()
1109
- if callback is not None and i % callback_steps == 0:
1110
- callback(i, t, latents)
1111
-
1112
- # 9. Post processing
1113
- if output_type == "latent":
1114
- video = latents
1115
- else:
1116
- video_tensor = self.decode_latents(latents)
1117
- video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
1118
-
1119
- # 10. Offload all models
1120
- self.maybe_free_model_hooks()
1121
-
1122
- if not return_dict:
1123
- return (video,)
1124
-
1125
- return AnimateDiffPipelineOutput(frames=video)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_animatediff_img2video.py DELETED
@@ -1,980 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- #
15
- # Note:
16
- # This pipeline relies on a "hack" discovered by the community that allows
17
- # the generation of videos given an input image with AnimateDiff. It works
18
- # by creating a copy of the image `num_frames` times and progressively adding
19
- # more noise to the image based on the strength and latent interpolation method.
20
-
21
- import inspect
22
- from types import FunctionType
23
- from typing import Any, Callable, Dict, List, Optional, Union
24
-
25
- import numpy as np
26
- import torch
27
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
28
-
29
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
- from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
31
- from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel
32
- from diffusers.models.lora import adjust_lora_scale_text_encoder
33
- from diffusers.models.unet_motion_model import MotionAdapter
34
- from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
35
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
36
- from diffusers.schedulers import (
37
- DDIMScheduler,
38
- DPMSolverMultistepScheduler,
39
- EulerAncestralDiscreteScheduler,
40
- EulerDiscreteScheduler,
41
- LMSDiscreteScheduler,
42
- PNDMScheduler,
43
- )
44
- from diffusers.utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
45
- from diffusers.utils.torch_utils import randn_tensor
46
-
47
-
48
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
49
-
50
- EXAMPLE_DOC_STRING = """
51
- Examples:
52
- ```py
53
- >>> import torch
54
- >>> from diffusers import MotionAdapter, DiffusionPipeline, DDIMScheduler
55
- >>> from diffusers.utils import export_to_gif, load_image
56
-
57
- >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
58
- >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
59
- >>> pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter, custom_pipeline="pipeline_animatediff_img2video").to("cuda")
60
- >>> pipe.scheduler = pipe.scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1)
61
-
62
- >>> image = load_image("snail.png")
63
- >>> output = pipe(image=image, prompt="A snail moving on the ground", strength=0.8, latent_interpolation_method="slerp")
64
- >>> frames = output.frames[0]
65
- >>> export_to_gif(frames, "animation.gif")
66
- ```
67
- """
68
-
69
-
70
- def lerp(
71
- v0: torch.Tensor,
72
- v1: torch.Tensor,
73
- t: Union[float, torch.Tensor],
74
- ) -> torch.Tensor:
75
- r"""
76
- Linear Interpolation between two tensors.
77
-
78
- Args:
79
- v0 (`torch.Tensor`): First tensor.
80
- v1 (`torch.Tensor`): Second tensor.
81
- t: (`float` or `torch.Tensor`): Interpolation factor.
82
- """
83
- t_is_float = False
84
- input_device = v0.device
85
- v0 = v0.cpu().numpy()
86
- v1 = v1.cpu().numpy()
87
-
88
- if isinstance(t, torch.Tensor):
89
- t = t.cpu().numpy()
90
- else:
91
- t_is_float = True
92
- t = np.array([t], dtype=v0.dtype)
93
-
94
- t = t[..., None]
95
- v0 = v0[None, ...]
96
- v1 = v1[None, ...]
97
- v2 = (1 - t) * v0 + t * v1
98
-
99
- if t_is_float and v0.ndim > 1:
100
- assert v2.shape[0] == 1
101
- v2 = np.squeeze(v2, axis=0)
102
-
103
- v2 = torch.from_numpy(v2).to(input_device)
104
- return v2
105
-
106
-
107
- def slerp(
108
- v0: torch.Tensor,
109
- v1: torch.Tensor,
110
- t: Union[float, torch.Tensor],
111
- DOT_THRESHOLD: float = 0.9995,
112
- ) -> torch.Tensor:
113
- r"""
114
- Spherical Linear Interpolation between two tensors.
115
-
116
- Args:
117
- v0 (`torch.Tensor`): First tensor.
118
- v1 (`torch.Tensor`): Second tensor.
119
- t: (`float` or `torch.Tensor`): Interpolation factor.
120
- DOT_THRESHOLD (`float`):
121
- Dot product threshold exceeding which linear interpolation will be used
122
- because input tensors are close to parallel.
123
- """
124
- t_is_float = False
125
- input_device = v0.device
126
- v0 = v0.cpu().numpy()
127
- v1 = v1.cpu().numpy()
128
-
129
- if isinstance(t, torch.Tensor):
130
- t = t.cpu().numpy()
131
- else:
132
- t_is_float = True
133
- t = np.array([t], dtype=v0.dtype)
134
-
135
- dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
136
-
137
- if np.abs(dot) > DOT_THRESHOLD:
138
- # v0 and v1 are close to parallel, so use linear interpolation instead
139
- v2 = lerp(v0, v1, t)
140
- else:
141
- theta_0 = np.arccos(dot)
142
- sin_theta_0 = np.sin(theta_0)
143
- theta_t = theta_0 * t
144
- sin_theta_t = np.sin(theta_t)
145
- s0 = np.sin(theta_0 - theta_t) / sin_theta_0
146
- s1 = sin_theta_t / sin_theta_0
147
- s0 = s0[..., None]
148
- s1 = s1[..., None]
149
- v0 = v0[None, ...]
150
- v1 = v1[None, ...]
151
- v2 = s0 * v0 + s1 * v1
152
-
153
- if t_is_float and v0.ndim > 1:
154
- assert v2.shape[0] == 1
155
- v2 = np.squeeze(v2, axis=0)
156
-
157
- v2 = torch.from_numpy(v2).to(input_device)
158
- return v2
159
-
160
-
161
- # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
162
- def tensor2vid(video: torch.Tensor, processor, output_type="np"):
163
- batch_size, channels, num_frames, height, width = video.shape
164
- outputs = []
165
- for batch_idx in range(batch_size):
166
- batch_vid = video[batch_idx].permute(1, 0, 2, 3)
167
- batch_output = processor.postprocess(batch_vid, output_type)
168
-
169
- outputs.append(batch_output)
170
-
171
- if output_type == "np":
172
- outputs = np.stack(outputs)
173
-
174
- elif output_type == "pt":
175
- outputs = torch.stack(outputs)
176
-
177
- elif not output_type == "pil":
178
- raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
179
-
180
- return outputs
181
-
182
-
183
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
184
- def retrieve_latents(
185
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
186
- ):
187
- if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
188
- return encoder_output.latent_dist.sample(generator)
189
- elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
190
- return encoder_output.latent_dist.mode()
191
- elif hasattr(encoder_output, "latents"):
192
- return encoder_output.latents
193
- else:
194
- raise AttributeError("Could not access latents of provided encoder_output")
195
-
196
-
197
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
198
- def retrieve_timesteps(
199
- scheduler,
200
- num_inference_steps: Optional[int] = None,
201
- device: Optional[Union[str, torch.device]] = None,
202
- timesteps: Optional[List[int]] = None,
203
- **kwargs,
204
- ):
205
- """
206
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
207
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
208
-
209
- Args:
210
- scheduler (`SchedulerMixin`):
211
- The scheduler to get timesteps from.
212
- num_inference_steps (`int`):
213
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
214
- `timesteps` must be `None`.
215
- device (`str` or `torch.device`, *optional*):
216
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
217
- timesteps (`List[int]`, *optional*):
218
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
219
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
220
- must be `None`.
221
-
222
- Returns:
223
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
224
- second element is the number of inference steps.
225
- """
226
- if timesteps is not None:
227
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
228
- if not accepts_timesteps:
229
- raise ValueError(
230
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
231
- f" timestep schedules. Please check whether you are using the correct scheduler."
232
- )
233
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
234
- timesteps = scheduler.timesteps
235
- num_inference_steps = len(timesteps)
236
- else:
237
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
238
- timesteps = scheduler.timesteps
239
- return timesteps, num_inference_steps
240
-
241
-
242
- class AnimateDiffImgToVideoPipeline(
243
- DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin
244
- ):
245
- r"""
246
- Pipeline for image-to-video generation.
247
-
248
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
249
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
250
-
251
- The pipeline also inherits the following loading methods:
252
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
253
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
254
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
255
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
256
-
257
- Args:
258
- vae ([`AutoencoderKL`]):
259
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
260
- text_encoder ([`CLIPTextModel`]):
261
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
262
- tokenizer (`CLIPTokenizer`):
263
- A [`~transformers.CLIPTokenizer`] to tokenize text.
264
- unet ([`UNet2DConditionModel`]):
265
- A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
266
- motion_adapter ([`MotionAdapter`]):
267
- A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
268
- scheduler ([`SchedulerMixin`]):
269
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
270
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
271
- """
272
-
273
- model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
274
- _optional_components = ["feature_extractor", "image_encoder"]
275
-
276
- def __init__(
277
- self,
278
- vae: AutoencoderKL,
279
- text_encoder: CLIPTextModel,
280
- tokenizer: CLIPTokenizer,
281
- unet: UNet2DConditionModel,
282
- motion_adapter: MotionAdapter,
283
- scheduler: Union[
284
- DDIMScheduler,
285
- PNDMScheduler,
286
- LMSDiscreteScheduler,
287
- EulerDiscreteScheduler,
288
- EulerAncestralDiscreteScheduler,
289
- DPMSolverMultistepScheduler,
290
- ],
291
- feature_extractor: CLIPImageProcessor = None,
292
- image_encoder: CLIPVisionModelWithProjection = None,
293
- ):
294
- super().__init__()
295
- unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
296
-
297
- self.register_modules(
298
- vae=vae,
299
- text_encoder=text_encoder,
300
- tokenizer=tokenizer,
301
- unet=unet,
302
- motion_adapter=motion_adapter,
303
- scheduler=scheduler,
304
- feature_extractor=feature_extractor,
305
- image_encoder=image_encoder,
306
- )
307
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
308
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
309
-
310
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
311
- def encode_prompt(
312
- self,
313
- prompt,
314
- device,
315
- num_images_per_prompt,
316
- do_classifier_free_guidance,
317
- negative_prompt=None,
318
- prompt_embeds: Optional[torch.Tensor] = None,
319
- negative_prompt_embeds: Optional[torch.Tensor] = None,
320
- lora_scale: Optional[float] = None,
321
- clip_skip: Optional[int] = None,
322
- ):
323
- r"""
324
- Encodes the prompt into text encoder hidden states.
325
-
326
- Args:
327
- prompt (`str` or `List[str]`, *optional*):
328
- prompt to be encoded
329
- device: (`torch.device`):
330
- torch device
331
- num_images_per_prompt (`int`):
332
- number of images that should be generated per prompt
333
- do_classifier_free_guidance (`bool`):
334
- whether to use classifier free guidance or not
335
- negative_prompt (`str` or `List[str]`, *optional*):
336
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
337
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
338
- less than `1`).
339
- prompt_embeds (`torch.Tensor`, *optional*):
340
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
341
- provided, text embeddings will be generated from `prompt` input argument.
342
- negative_prompt_embeds (`torch.Tensor`, *optional*):
343
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
344
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
345
- argument.
346
- lora_scale (`float`, *optional*):
347
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
348
- clip_skip (`int`, *optional*):
349
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
350
- the output of the pre-final layer will be used for computing the prompt embeddings.
351
- """
352
- # set lora scale so that monkey patched LoRA
353
- # function of text encoder can correctly access it
354
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
355
- self._lora_scale = lora_scale
356
-
357
- # dynamically adjust the LoRA scale
358
- if not USE_PEFT_BACKEND:
359
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
360
- else:
361
- scale_lora_layers(self.text_encoder, lora_scale)
362
-
363
- if prompt is not None and isinstance(prompt, str):
364
- batch_size = 1
365
- elif prompt is not None and isinstance(prompt, list):
366
- batch_size = len(prompt)
367
- else:
368
- batch_size = prompt_embeds.shape[0]
369
-
370
- if prompt_embeds is None:
371
- # textual inversion: procecss multi-vector tokens if necessary
372
- if isinstance(self, TextualInversionLoaderMixin):
373
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
374
-
375
- text_inputs = self.tokenizer(
376
- prompt,
377
- padding="max_length",
378
- max_length=self.tokenizer.model_max_length,
379
- truncation=True,
380
- return_tensors="pt",
381
- )
382
- text_input_ids = text_inputs.input_ids
383
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
384
-
385
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
386
- text_input_ids, untruncated_ids
387
- ):
388
- removed_text = self.tokenizer.batch_decode(
389
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
390
- )
391
- logger.warning(
392
- "The following part of your input was truncated because CLIP can only handle sequences up to"
393
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
394
- )
395
-
396
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
397
- attention_mask = text_inputs.attention_mask.to(device)
398
- else:
399
- attention_mask = None
400
-
401
- if clip_skip is None:
402
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
403
- prompt_embeds = prompt_embeds[0]
404
- else:
405
- prompt_embeds = self.text_encoder(
406
- text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
407
- )
408
- # Access the `hidden_states` first, that contains a tuple of
409
- # all the hidden states from the encoder layers. Then index into
410
- # the tuple to access the hidden states from the desired layer.
411
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
412
- # We also need to apply the final LayerNorm here to not mess with the
413
- # representations. The `last_hidden_states` that we typically use for
414
- # obtaining the final prompt representations passes through the LayerNorm
415
- # layer.
416
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
417
-
418
- if self.text_encoder is not None:
419
- prompt_embeds_dtype = self.text_encoder.dtype
420
- elif self.unet is not None:
421
- prompt_embeds_dtype = self.unet.dtype
422
- else:
423
- prompt_embeds_dtype = prompt_embeds.dtype
424
-
425
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
426
-
427
- bs_embed, seq_len, _ = prompt_embeds.shape
428
- # duplicate text embeddings for each generation per prompt, using mps friendly method
429
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
430
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
431
-
432
- # get unconditional embeddings for classifier free guidance
433
- if do_classifier_free_guidance and negative_prompt_embeds is None:
434
- uncond_tokens: List[str]
435
- if negative_prompt is None:
436
- uncond_tokens = [""] * batch_size
437
- elif prompt is not None and type(prompt) is not type(negative_prompt):
438
- raise TypeError(
439
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
440
- f" {type(prompt)}."
441
- )
442
- elif isinstance(negative_prompt, str):
443
- uncond_tokens = [negative_prompt]
444
- elif batch_size != len(negative_prompt):
445
- raise ValueError(
446
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
447
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
448
- " the batch size of `prompt`."
449
- )
450
- else:
451
- uncond_tokens = negative_prompt
452
-
453
- # textual inversion: procecss multi-vector tokens if necessary
454
- if isinstance(self, TextualInversionLoaderMixin):
455
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
456
-
457
- max_length = prompt_embeds.shape[1]
458
- uncond_input = self.tokenizer(
459
- uncond_tokens,
460
- padding="max_length",
461
- max_length=max_length,
462
- truncation=True,
463
- return_tensors="pt",
464
- )
465
-
466
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
467
- attention_mask = uncond_input.attention_mask.to(device)
468
- else:
469
- attention_mask = None
470
-
471
- negative_prompt_embeds = self.text_encoder(
472
- uncond_input.input_ids.to(device),
473
- attention_mask=attention_mask,
474
- )
475
- negative_prompt_embeds = negative_prompt_embeds[0]
476
-
477
- if do_classifier_free_guidance:
478
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
479
- seq_len = negative_prompt_embeds.shape[1]
480
-
481
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
482
-
483
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
484
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
485
-
486
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
487
- # Retrieve the original scale by scaling back the LoRA layers
488
- unscale_lora_layers(self.text_encoder, lora_scale)
489
-
490
- return prompt_embeds, negative_prompt_embeds
491
-
492
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
493
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
494
- dtype = next(self.image_encoder.parameters()).dtype
495
-
496
- if not isinstance(image, torch.Tensor):
497
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
498
-
499
- image = image.to(device=device, dtype=dtype)
500
- if output_hidden_states:
501
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
502
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
503
- uncond_image_enc_hidden_states = self.image_encoder(
504
- torch.zeros_like(image), output_hidden_states=True
505
- ).hidden_states[-2]
506
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
507
- num_images_per_prompt, dim=0
508
- )
509
- return image_enc_hidden_states, uncond_image_enc_hidden_states
510
- else:
511
- image_embeds = self.image_encoder(image).image_embeds
512
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
513
- uncond_image_embeds = torch.zeros_like(image_embeds)
514
-
515
- return image_embeds, uncond_image_embeds
516
-
517
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
518
- def prepare_ip_adapter_image_embeds(
519
- self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
520
- ):
521
- if ip_adapter_image_embeds is None:
522
- if not isinstance(ip_adapter_image, list):
523
- ip_adapter_image = [ip_adapter_image]
524
-
525
- if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
526
- raise ValueError(
527
- f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
528
- )
529
-
530
- image_embeds = []
531
- for single_ip_adapter_image, image_proj_layer in zip(
532
- ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
533
- ):
534
- output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
535
- single_image_embeds, single_negative_image_embeds = self.encode_image(
536
- single_ip_adapter_image, device, 1, output_hidden_state
537
- )
538
- single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
539
- single_negative_image_embeds = torch.stack(
540
- [single_negative_image_embeds] * num_images_per_prompt, dim=0
541
- )
542
-
543
- if self.do_classifier_free_guidance:
544
- single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
545
- single_image_embeds = single_image_embeds.to(device)
546
-
547
- image_embeds.append(single_image_embeds)
548
- else:
549
- image_embeds = ip_adapter_image_embeds
550
- return image_embeds
551
-
552
- # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
553
- def decode_latents(self, latents):
554
- latents = 1 / self.vae.config.scaling_factor * latents
555
-
556
- batch_size, channels, num_frames, height, width = latents.shape
557
- latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
558
-
559
- image = self.vae.decode(latents).sample
560
- video = (
561
- image[None, :]
562
- .reshape(
563
- (
564
- batch_size,
565
- num_frames,
566
- -1,
567
- )
568
- + image.shape[2:]
569
- )
570
- .permute(0, 2, 1, 3, 4)
571
- )
572
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
573
- video = video.float()
574
- return video
575
-
576
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
577
- def prepare_extra_step_kwargs(self, generator, eta):
578
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
579
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
580
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
581
- # and should be between [0, 1]
582
-
583
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
584
- extra_step_kwargs = {}
585
- if accepts_eta:
586
- extra_step_kwargs["eta"] = eta
587
-
588
- # check if the scheduler accepts generator
589
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
590
- if accepts_generator:
591
- extra_step_kwargs["generator"] = generator
592
- return extra_step_kwargs
593
-
594
- def check_inputs(
595
- self,
596
- prompt,
597
- height,
598
- width,
599
- callback_steps,
600
- negative_prompt=None,
601
- prompt_embeds=None,
602
- negative_prompt_embeds=None,
603
- callback_on_step_end_tensor_inputs=None,
604
- latent_interpolation_method=None,
605
- ):
606
- if height % 8 != 0 or width % 8 != 0:
607
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
608
-
609
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
610
- raise ValueError(
611
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
612
- f" {type(callback_steps)}."
613
- )
614
- if callback_on_step_end_tensor_inputs is not None and not all(
615
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
616
- ):
617
- raise ValueError(
618
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
619
- )
620
-
621
- if prompt is not None and prompt_embeds is not None:
622
- raise ValueError(
623
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
624
- " only forward one of the two."
625
- )
626
- elif prompt is None and prompt_embeds is None:
627
- raise ValueError(
628
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
629
- )
630
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
631
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
632
-
633
- if negative_prompt is not None and negative_prompt_embeds is not None:
634
- raise ValueError(
635
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
636
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
637
- )
638
-
639
- if prompt_embeds is not None and negative_prompt_embeds is not None:
640
- if prompt_embeds.shape != negative_prompt_embeds.shape:
641
- raise ValueError(
642
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
643
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
644
- f" {negative_prompt_embeds.shape}."
645
- )
646
-
647
- if latent_interpolation_method is not None:
648
- if latent_interpolation_method not in ["lerp", "slerp"] and not isinstance(
649
- latent_interpolation_method, FunctionType
650
- ):
651
- raise ValueError(
652
- "`latent_interpolation_method` must be one of `lerp`, `slerp` or a Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]"
653
- )
654
-
655
- def prepare_latents(
656
- self,
657
- image,
658
- strength,
659
- batch_size,
660
- num_channels_latents,
661
- num_frames,
662
- height,
663
- width,
664
- dtype,
665
- device,
666
- generator,
667
- latents=None,
668
- latent_interpolation_method="slerp",
669
- ):
670
- shape = (
671
- batch_size,
672
- num_channels_latents,
673
- num_frames,
674
- height // self.vae_scale_factor,
675
- width // self.vae_scale_factor,
676
- )
677
-
678
- if latents is None:
679
- image = image.to(device=device, dtype=dtype)
680
-
681
- if image.shape[1] == 4:
682
- latents = image
683
- else:
684
- # make sure the VAE is in float32 mode, as it overflows in float16
685
- if self.vae.config.force_upcast:
686
- image = image.float()
687
- self.vae.to(dtype=torch.float32)
688
-
689
- if isinstance(generator, list):
690
- if len(generator) != batch_size:
691
- raise ValueError(
692
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
693
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
694
- )
695
-
696
- init_latents = [
697
- retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
698
- for i in range(batch_size)
699
- ]
700
- init_latents = torch.cat(init_latents, dim=0)
701
- else:
702
- init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
703
-
704
- if self.vae.config.force_upcast:
705
- self.vae.to(dtype)
706
-
707
- init_latents = init_latents.to(dtype)
708
- init_latents = self.vae.config.scaling_factor * init_latents
709
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
710
- latents = latents * self.scheduler.init_noise_sigma
711
-
712
- if latent_interpolation_method == "lerp":
713
-
714
- def latent_cls(v0, v1, index):
715
- return lerp(v0, v1, index / num_frames * (1 - strength))
716
- elif latent_interpolation_method == "slerp":
717
-
718
- def latent_cls(v0, v1, index):
719
- return slerp(v0, v1, index / num_frames * (1 - strength))
720
- else:
721
- latent_cls = latent_interpolation_method
722
-
723
- for i in range(num_frames):
724
- latents[:, :, i, :, :] = latent_cls(latents[:, :, i, :, :], init_latents, i)
725
- else:
726
- if shape != latents.shape:
727
- # [B, C, F, H, W]
728
- raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}")
729
- latents = latents.to(device, dtype=dtype)
730
-
731
- return latents
732
-
733
- @torch.no_grad()
734
- def __call__(
735
- self,
736
- image: PipelineImageInput,
737
- prompt: Optional[Union[str, List[str]]] = None,
738
- height: Optional[int] = None,
739
- width: Optional[int] = None,
740
- num_frames: int = 16,
741
- num_inference_steps: int = 50,
742
- timesteps: Optional[List[int]] = None,
743
- guidance_scale: float = 7.5,
744
- strength: float = 0.8,
745
- negative_prompt: Optional[Union[str, List[str]]] = None,
746
- num_videos_per_prompt: Optional[int] = 1,
747
- eta: float = 0.0,
748
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
749
- latents: Optional[torch.Tensor] = None,
750
- prompt_embeds: Optional[torch.Tensor] = None,
751
- negative_prompt_embeds: Optional[torch.Tensor] = None,
752
- ip_adapter_image: Optional[PipelineImageInput] = None,
753
- ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
754
- output_type: Optional[str] = "pil",
755
- return_dict: bool = True,
756
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
757
- callback_steps: Optional[int] = 1,
758
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
759
- clip_skip: Optional[int] = None,
760
- latent_interpolation_method: Union[str, Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]] = "slerp",
761
- ):
762
- r"""
763
- The call function to the pipeline for generation.
764
-
765
- Args:
766
- image (`PipelineImageInput`):
767
- The input image to condition the generation on.
768
- prompt (`str` or `List[str]`, *optional*):
769
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
770
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
771
- The height in pixels of the generated video.
772
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
773
- The width in pixels of the generated video.
774
- num_frames (`int`, *optional*, defaults to 16):
775
- The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
776
- amounts to 2 seconds of video.
777
- num_inference_steps (`int`, *optional*, defaults to 50):
778
- The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
779
- expense of slower inference.
780
- strength (`float`, *optional*, defaults to 0.8):
781
- Higher strength leads to more differences between original image and generated video.
782
- guidance_scale (`float`, *optional*, defaults to 7.5):
783
- A higher guidance scale value encourages the model to generate images closely linked to the text
784
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
785
- negative_prompt (`str` or `List[str]`, *optional*):
786
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
787
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
788
- eta (`float`, *optional*, defaults to 0.0):
789
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
790
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
791
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
792
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
793
- generation deterministic.
794
- latents (`torch.Tensor`, *optional*):
795
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
796
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
797
- tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
798
- `(batch_size, num_channel, num_frames, height, width)`.
799
- prompt_embeds (`torch.Tensor`, *optional*):
800
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
801
- provided, text embeddings are generated from the `prompt` input argument.
802
- negative_prompt_embeds (`torch.Tensor`, *optional*):
803
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
804
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
805
- ip_adapter_image: (`PipelineImageInput`, *optional*):
806
- Optional image input to work with IP Adapters.
807
- ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
808
- Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
809
- Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
810
- if `do_classifier_free_guidance` is set to `True`.
811
- If not provided, embeddings are computed from the `ip_adapter_image` input argument.
812
- output_type (`str`, *optional*, defaults to `"pil"`):
813
- The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or
814
- `np.array`.
815
- return_dict (`bool`, *optional*, defaults to `True`):
816
- Whether or not to return a [`AnimateDiffImgToVideoPipelineOutput`] instead
817
- of a plain tuple.
818
- callback (`Callable`, *optional*):
819
- A function that calls every `callback_steps` steps during inference. The function is called with the
820
- following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
821
- callback_steps (`int`, *optional*, defaults to 1):
822
- The frequency at which the `callback` function is called. If not specified, the callback is called at
823
- every step.
824
- cross_attention_kwargs (`dict`, *optional*):
825
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
826
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
827
- clip_skip (`int`, *optional*):
828
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
829
- the output of the pre-final layer will be used for computing the prompt embeddings.
830
- latent_interpolation_method (`str` or `Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]]`, *optional*):
831
- Must be one of "lerp", "slerp" or a callable that takes in a random noisy latent, image latent and a frame index
832
- as input and returns an initial latent for sampling.
833
- Examples:
834
-
835
- Returns:
836
- [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
837
- If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
838
- returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
839
- """
840
- # 0. Default height and width to unet
841
- height = height or self.unet.config.sample_size * self.vae_scale_factor
842
- width = width or self.unet.config.sample_size * self.vae_scale_factor
843
-
844
- num_videos_per_prompt = 1
845
-
846
- # 1. Check inputs. Raise error if not correct
847
- self.check_inputs(
848
- prompt=prompt,
849
- height=height,
850
- width=width,
851
- callback_steps=callback_steps,
852
- negative_prompt=negative_prompt,
853
- prompt_embeds=prompt_embeds,
854
- negative_prompt_embeds=negative_prompt_embeds,
855
- latent_interpolation_method=latent_interpolation_method,
856
- )
857
-
858
- # 2. Define call parameters
859
- if prompt is not None and isinstance(prompt, str):
860
- batch_size = 1
861
- elif prompt is not None and isinstance(prompt, list):
862
- batch_size = len(prompt)
863
- else:
864
- batch_size = prompt_embeds.shape[0]
865
-
866
- device = self._execution_device
867
-
868
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
869
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
870
- # corresponds to doing no classifier free guidance.
871
- do_classifier_free_guidance = guidance_scale > 1.0
872
-
873
- # 3. Encode input prompt
874
- text_encoder_lora_scale = (
875
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
876
- )
877
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
878
- prompt,
879
- device,
880
- num_videos_per_prompt,
881
- do_classifier_free_guidance,
882
- negative_prompt,
883
- prompt_embeds=prompt_embeds,
884
- negative_prompt_embeds=negative_prompt_embeds,
885
- lora_scale=text_encoder_lora_scale,
886
- clip_skip=clip_skip,
887
- )
888
-
889
- # For classifier free guidance, we need to do two forward passes.
890
- # Here we concatenate the unconditional and text embeddings into a single batch
891
- # to avoid doing two forward passes
892
- if do_classifier_free_guidance:
893
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
894
-
895
- if ip_adapter_image is not None:
896
- image_embeds = self.prepare_ip_adapter_image_embeds(
897
- ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt
898
- )
899
-
900
- # 4. Preprocess image
901
- image = self.image_processor.preprocess(image, height=height, width=width)
902
-
903
- # 5. Prepare timesteps
904
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
905
-
906
- # 6. Prepare latent variables
907
- num_channels_latents = self.unet.config.in_channels
908
- latents = self.prepare_latents(
909
- image=image,
910
- strength=strength,
911
- batch_size=batch_size * num_videos_per_prompt,
912
- num_channels_latents=num_channels_latents,
913
- num_frames=num_frames,
914
- height=height,
915
- width=width,
916
- dtype=prompt_embeds.dtype,
917
- device=device,
918
- generator=generator,
919
- latents=latents,
920
- latent_interpolation_method=latent_interpolation_method,
921
- )
922
-
923
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
924
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
925
-
926
- # 8. Add image embeds for IP-Adapter
927
- added_cond_kwargs = (
928
- {"image_embeds": image_embeds}
929
- if ip_adapter_image is not None or ip_adapter_image_embeds is not None
930
- else None
931
- )
932
-
933
- # 9. Denoising loop
934
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
935
- with self.progress_bar(total=num_inference_steps) as progress_bar:
936
- for i, t in enumerate(timesteps):
937
- # expand the latents if we are doing classifier free guidance
938
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
939
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
940
-
941
- # predict the noise residual
942
- noise_pred = self.unet(
943
- latent_model_input,
944
- t,
945
- encoder_hidden_states=prompt_embeds,
946
- cross_attention_kwargs=cross_attention_kwargs,
947
- added_cond_kwargs=added_cond_kwargs,
948
- ).sample
949
-
950
- # perform guidance
951
- if do_classifier_free_guidance:
952
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
953
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
954
-
955
- # compute the previous noisy sample x_t -> x_t-1
956
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
957
-
958
- # call the callback, if provided
959
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
960
- progress_bar.update()
961
- if callback is not None and i % callback_steps == 0:
962
- callback(i, t, latents)
963
-
964
- if output_type == "latent":
965
- return AnimateDiffPipelineOutput(frames=latents)
966
-
967
- # 10. Post-processing
968
- if output_type == "latent":
969
- video = latents
970
- else:
971
- video_tensor = self.decode_latents(latents)
972
- video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
973
-
974
- # 11. Offload all models
975
- self.maybe_free_model_hooks()
976
-
977
- if not return_dict:
978
- return (video,)
979
-
980
- return AnimateDiffPipelineOutput(frames=video)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_demofusion_sdxl.py DELETED
@@ -1,1392 +0,0 @@
1
- import inspect
2
- import os
3
- import random
4
- import warnings
5
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6
-
7
- import matplotlib.pyplot as plt
8
- import torch
9
- import torch.nn.functional as F
10
- from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
11
-
12
- from diffusers.image_processor import VaeImageProcessor
13
- from diffusers.loaders import (
14
- FromSingleFileMixin,
15
- LoraLoaderMixin,
16
- TextualInversionLoaderMixin,
17
- )
18
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
19
- from diffusers.models.attention_processor import (
20
- AttnProcessor2_0,
21
- LoRAAttnProcessor2_0,
22
- LoRAXFormersAttnProcessor,
23
- XFormersAttnProcessor,
24
- )
25
- from diffusers.models.lora import adjust_lora_scale_text_encoder
26
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
27
- from diffusers.schedulers import KarrasDiffusionSchedulers
28
- from diffusers.utils import (
29
- is_accelerate_available,
30
- is_accelerate_version,
31
- is_invisible_watermark_available,
32
- logging,
33
- replace_example_docstring,
34
- )
35
- from diffusers.utils.torch_utils import randn_tensor
36
-
37
-
38
- if is_invisible_watermark_available():
39
- from diffusers.pipelines.stable_diffusion_xl.watermark import (
40
- StableDiffusionXLWatermarker,
41
- )
42
-
43
-
44
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
-
46
- EXAMPLE_DOC_STRING = """
47
- Examples:
48
- ```py
49
- >>> import torch
50
- >>> from diffusers import StableDiffusionXLPipeline
51
-
52
- >>> pipe = StableDiffusionXLPipeline.from_pretrained(
53
- ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
54
- ... )
55
- >>> pipe = pipe.to("cuda")
56
-
57
- >>> prompt = "a photo of an astronaut riding a horse on mars"
58
- >>> image = pipe(prompt).images[0]
59
- ```
60
- """
61
-
62
-
63
- def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
64
- x_coord = torch.arange(kernel_size)
65
- gaussian_1d = torch.exp(-((x_coord - (kernel_size - 1) / 2) ** 2) / (2 * sigma**2))
66
- gaussian_1d = gaussian_1d / gaussian_1d.sum()
67
- gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :]
68
- kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1)
69
-
70
- return kernel
71
-
72
-
73
- def gaussian_filter(latents, kernel_size=3, sigma=1.0):
74
- channels = latents.shape[1]
75
- kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
76
- blurred_latents = F.conv2d(latents, kernel, padding=kernel_size // 2, groups=channels)
77
-
78
- return blurred_latents
79
-
80
-
81
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
82
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
83
- """
84
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
85
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
86
- """
87
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
88
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
89
- # rescale the results from guidance (fixes overexposure)
90
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
91
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
92
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
93
- return noise_cfg
94
-
95
-
96
- class DemoFusionSDXLPipeline(
97
- DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
98
- ):
99
- r"""
100
- Pipeline for text-to-image generation using Stable Diffusion XL.
101
-
102
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
103
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
104
-
105
- In addition the pipeline inherits the following loading methods:
106
- - *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
107
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
108
-
109
- as well as the following saving methods:
110
- - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
111
-
112
- Args:
113
- vae ([`AutoencoderKL`]):
114
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
115
- text_encoder ([`CLIPTextModel`]):
116
- Frozen text-encoder. Stable Diffusion XL uses the text portion of
117
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
118
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
119
- text_encoder_2 ([` CLIPTextModelWithProjection`]):
120
- Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
121
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
122
- specifically the
123
- [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
124
- variant.
125
- tokenizer (`CLIPTokenizer`):
126
- Tokenizer of class
127
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
128
- tokenizer_2 (`CLIPTokenizer`):
129
- Second Tokenizer of class
130
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
131
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
132
- scheduler ([`SchedulerMixin`]):
133
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
134
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
135
- force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
136
- Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
137
- `stabilityai/stable-diffusion-xl-base-1-0`.
138
- add_watermarker (`bool`, *optional*):
139
- Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
140
- watermark output images. If not defined, it will default to True if the package is installed, otherwise no
141
- watermarker will be used.
142
- """
143
-
144
- model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
145
-
146
- def __init__(
147
- self,
148
- vae: AutoencoderKL,
149
- text_encoder: CLIPTextModel,
150
- text_encoder_2: CLIPTextModelWithProjection,
151
- tokenizer: CLIPTokenizer,
152
- tokenizer_2: CLIPTokenizer,
153
- unet: UNet2DConditionModel,
154
- scheduler: KarrasDiffusionSchedulers,
155
- force_zeros_for_empty_prompt: bool = True,
156
- add_watermarker: Optional[bool] = None,
157
- ):
158
- super().__init__()
159
-
160
- self.register_modules(
161
- vae=vae,
162
- text_encoder=text_encoder,
163
- text_encoder_2=text_encoder_2,
164
- tokenizer=tokenizer,
165
- tokenizer_2=tokenizer_2,
166
- unet=unet,
167
- scheduler=scheduler,
168
- )
169
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
170
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
171
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
172
- self.default_sample_size = self.unet.config.sample_size
173
-
174
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
175
-
176
- if add_watermarker:
177
- self.watermark = StableDiffusionXLWatermarker()
178
- else:
179
- self.watermark = None
180
-
181
- def encode_prompt(
182
- self,
183
- prompt: str,
184
- prompt_2: Optional[str] = None,
185
- device: Optional[torch.device] = None,
186
- num_images_per_prompt: int = 1,
187
- do_classifier_free_guidance: bool = True,
188
- negative_prompt: Optional[str] = None,
189
- negative_prompt_2: Optional[str] = None,
190
- prompt_embeds: Optional[torch.Tensor] = None,
191
- negative_prompt_embeds: Optional[torch.Tensor] = None,
192
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
193
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
194
- lora_scale: Optional[float] = None,
195
- ):
196
- r"""
197
- Encodes the prompt into text encoder hidden states.
198
-
199
- Args:
200
- prompt (`str` or `List[str]`, *optional*):
201
- prompt to be encoded
202
- prompt_2 (`str` or `List[str]`, *optional*):
203
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
204
- used in both text-encoders
205
- device: (`torch.device`):
206
- torch device
207
- num_images_per_prompt (`int`):
208
- number of images that should be generated per prompt
209
- do_classifier_free_guidance (`bool`):
210
- whether to use classifier free guidance or not
211
- negative_prompt (`str` or `List[str]`, *optional*):
212
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
213
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
214
- less than `1`).
215
- negative_prompt_2 (`str` or `List[str]`, *optional*):
216
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
217
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
218
- prompt_embeds (`torch.Tensor`, *optional*):
219
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
220
- provided, text embeddings will be generated from `prompt` input argument.
221
- negative_prompt_embeds (`torch.Tensor`, *optional*):
222
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
223
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
224
- argument.
225
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
226
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
227
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
228
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
229
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
230
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
231
- input argument.
232
- lora_scale (`float`, *optional*):
233
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
234
- """
235
- device = device or self._execution_device
236
-
237
- # set lora scale so that monkey patched LoRA
238
- # function of text encoder can correctly access it
239
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
240
- self._lora_scale = lora_scale
241
-
242
- # dynamically adjust the LoRA scale
243
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
244
- adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
245
-
246
- if prompt is not None and isinstance(prompt, str):
247
- batch_size = 1
248
- elif prompt is not None and isinstance(prompt, list):
249
- batch_size = len(prompt)
250
- else:
251
- batch_size = prompt_embeds.shape[0]
252
-
253
- # Define tokenizers and text encoders
254
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
255
- text_encoders = (
256
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
257
- )
258
-
259
- if prompt_embeds is None:
260
- prompt_2 = prompt_2 or prompt
261
- # textual inversion: process multi-vector tokens if necessary
262
- prompt_embeds_list = []
263
- prompts = [prompt, prompt_2]
264
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
265
- if isinstance(self, TextualInversionLoaderMixin):
266
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
267
-
268
- text_inputs = tokenizer(
269
- prompt,
270
- padding="max_length",
271
- max_length=tokenizer.model_max_length,
272
- truncation=True,
273
- return_tensors="pt",
274
- )
275
-
276
- text_input_ids = text_inputs.input_ids
277
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
278
-
279
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
280
- text_input_ids, untruncated_ids
281
- ):
282
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
283
- logger.warning(
284
- "The following part of your input was truncated because CLIP can only handle sequences up to"
285
- f" {tokenizer.model_max_length} tokens: {removed_text}"
286
- )
287
-
288
- prompt_embeds = text_encoder(
289
- text_input_ids.to(device),
290
- output_hidden_states=True,
291
- )
292
-
293
- # We are only ALWAYS interested in the pooled output of the final text encoder
294
- pooled_prompt_embeds = prompt_embeds[0]
295
- prompt_embeds = prompt_embeds.hidden_states[-2]
296
-
297
- prompt_embeds_list.append(prompt_embeds)
298
-
299
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
300
-
301
- # get unconditional embeddings for classifier free guidance
302
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
303
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
304
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
305
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
306
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
307
- negative_prompt = negative_prompt or ""
308
- negative_prompt_2 = negative_prompt_2 or negative_prompt
309
-
310
- uncond_tokens: List[str]
311
- if prompt is not None and type(prompt) is not type(negative_prompt):
312
- raise TypeError(
313
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
314
- f" {type(prompt)}."
315
- )
316
- elif isinstance(negative_prompt, str):
317
- uncond_tokens = [negative_prompt, negative_prompt_2]
318
- elif batch_size != len(negative_prompt):
319
- raise ValueError(
320
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
321
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
322
- " the batch size of `prompt`."
323
- )
324
- else:
325
- uncond_tokens = [negative_prompt, negative_prompt_2]
326
-
327
- negative_prompt_embeds_list = []
328
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
329
- if isinstance(self, TextualInversionLoaderMixin):
330
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
331
-
332
- max_length = prompt_embeds.shape[1]
333
- uncond_input = tokenizer(
334
- negative_prompt,
335
- padding="max_length",
336
- max_length=max_length,
337
- truncation=True,
338
- return_tensors="pt",
339
- )
340
-
341
- negative_prompt_embeds = text_encoder(
342
- uncond_input.input_ids.to(device),
343
- output_hidden_states=True,
344
- )
345
- # We are only ALWAYS interested in the pooled output of the final text encoder
346
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
347
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
348
-
349
- negative_prompt_embeds_list.append(negative_prompt_embeds)
350
-
351
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
352
-
353
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
354
- bs_embed, seq_len, _ = prompt_embeds.shape
355
- # duplicate text embeddings for each generation per prompt, using mps friendly method
356
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
357
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
358
-
359
- if do_classifier_free_guidance:
360
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
361
- seq_len = negative_prompt_embeds.shape[1]
362
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
363
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
364
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
365
-
366
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
367
- bs_embed * num_images_per_prompt, -1
368
- )
369
- if do_classifier_free_guidance:
370
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
371
- bs_embed * num_images_per_prompt, -1
372
- )
373
-
374
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
375
-
376
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
377
- def prepare_extra_step_kwargs(self, generator, eta):
378
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
379
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
380
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
381
- # and should be between [0, 1]
382
-
383
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
384
- extra_step_kwargs = {}
385
- if accepts_eta:
386
- extra_step_kwargs["eta"] = eta
387
-
388
- # check if the scheduler accepts generator
389
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
390
- if accepts_generator:
391
- extra_step_kwargs["generator"] = generator
392
- return extra_step_kwargs
393
-
394
- def check_inputs(
395
- self,
396
- prompt,
397
- prompt_2,
398
- height,
399
- width,
400
- callback_steps,
401
- negative_prompt=None,
402
- negative_prompt_2=None,
403
- prompt_embeds=None,
404
- negative_prompt_embeds=None,
405
- pooled_prompt_embeds=None,
406
- negative_pooled_prompt_embeds=None,
407
- num_images_per_prompt=None,
408
- ):
409
- if height % 8 != 0 or width % 8 != 0:
410
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
411
-
412
- if (callback_steps is None) or (
413
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
414
- ):
415
- raise ValueError(
416
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
417
- f" {type(callback_steps)}."
418
- )
419
-
420
- if prompt is not None and prompt_embeds is not None:
421
- raise ValueError(
422
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
423
- " only forward one of the two."
424
- )
425
- elif prompt_2 is not None and prompt_embeds is not None:
426
- raise ValueError(
427
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
428
- " only forward one of the two."
429
- )
430
- elif prompt is None and prompt_embeds is None:
431
- raise ValueError(
432
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
433
- )
434
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
435
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
436
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
437
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
438
-
439
- if negative_prompt is not None and negative_prompt_embeds is not None:
440
- raise ValueError(
441
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
442
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
443
- )
444
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
445
- raise ValueError(
446
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
447
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
448
- )
449
-
450
- if prompt_embeds is not None and negative_prompt_embeds is not None:
451
- if prompt_embeds.shape != negative_prompt_embeds.shape:
452
- raise ValueError(
453
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
454
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
455
- f" {negative_prompt_embeds.shape}."
456
- )
457
-
458
- if prompt_embeds is not None and pooled_prompt_embeds is None:
459
- raise ValueError(
460
- "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
461
- )
462
-
463
- if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
464
- raise ValueError(
465
- "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
466
- )
467
-
468
- # DemoFusion specific checks
469
- if max(height, width) % 1024 != 0:
470
- raise ValueError(
471
- f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}."
472
- )
473
-
474
- if num_images_per_prompt != 1:
475
- warnings.warn("num_images_per_prompt != 1 is not supported by DemoFusion and will be ignored.")
476
- num_images_per_prompt = 1
477
-
478
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
479
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
480
- shape = (
481
- batch_size,
482
- num_channels_latents,
483
- int(height) // self.vae_scale_factor,
484
- int(width) // self.vae_scale_factor,
485
- )
486
- if isinstance(generator, list) and len(generator) != batch_size:
487
- raise ValueError(
488
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
489
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
490
- )
491
-
492
- if latents is None:
493
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
494
- else:
495
- latents = latents.to(device)
496
-
497
- # scale the initial noise by the standard deviation required by the scheduler
498
- latents = latents * self.scheduler.init_noise_sigma
499
- return latents
500
-
501
- def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
502
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
503
-
504
- passed_add_embed_dim = (
505
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
506
- )
507
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
508
-
509
- if expected_add_embed_dim != passed_add_embed_dim:
510
- raise ValueError(
511
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
512
- )
513
-
514
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
515
- return add_time_ids
516
-
517
- def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
518
- height //= self.vae_scale_factor
519
- width //= self.vae_scale_factor
520
- num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
521
- num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
522
- total_num_blocks = int(num_blocks_height * num_blocks_width)
523
- views = []
524
- for i in range(total_num_blocks):
525
- h_start = int((i // num_blocks_width) * stride)
526
- h_end = h_start + window_size
527
- w_start = int((i % num_blocks_width) * stride)
528
- w_end = w_start + window_size
529
-
530
- if h_end > height:
531
- h_start = int(h_start + height - h_end)
532
- h_end = int(height)
533
- if w_end > width:
534
- w_start = int(w_start + width - w_end)
535
- w_end = int(width)
536
- if h_start < 0:
537
- h_end = int(h_end - h_start)
538
- h_start = 0
539
- if w_start < 0:
540
- w_end = int(w_end - w_start)
541
- w_start = 0
542
-
543
- if random_jitter:
544
- jitter_range = (window_size - stride) // 4
545
- w_jitter = 0
546
- h_jitter = 0
547
- if (w_start != 0) and (w_end != width):
548
- w_jitter = random.randint(-jitter_range, jitter_range)
549
- elif (w_start == 0) and (w_end != width):
550
- w_jitter = random.randint(-jitter_range, 0)
551
- elif (w_start != 0) and (w_end == width):
552
- w_jitter = random.randint(0, jitter_range)
553
- if (h_start != 0) and (h_end != height):
554
- h_jitter = random.randint(-jitter_range, jitter_range)
555
- elif (h_start == 0) and (h_end != height):
556
- h_jitter = random.randint(-jitter_range, 0)
557
- elif (h_start != 0) and (h_end == height):
558
- h_jitter = random.randint(0, jitter_range)
559
- h_start += h_jitter + jitter_range
560
- h_end += h_jitter + jitter_range
561
- w_start += w_jitter + jitter_range
562
- w_end += w_jitter + jitter_range
563
-
564
- views.append((h_start, h_end, w_start, w_end))
565
- return views
566
-
567
- def tiled_decode(self, latents, current_height, current_width):
568
- core_size = self.unet.config.sample_size // 4
569
- core_stride = core_size
570
- pad_size = self.unet.config.sample_size // 4 * 3
571
- decoder_view_batch_size = 1
572
-
573
- views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size)
574
- views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
575
- latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), "constant", 0)
576
- image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device)
577
- count = torch.zeros_like(image).to(latents.device)
578
- # get the latents corresponding to the current view coordinates
579
- with self.progress_bar(total=len(views_batch)) as progress_bar:
580
- for j, batch_view in enumerate(views_batch):
581
- len(batch_view)
582
- latents_for_view = torch.cat(
583
- [
584
- latents_[:, :, h_start : h_end + pad_size * 2, w_start : w_end + pad_size * 2]
585
- for h_start, h_end, w_start, w_end in batch_view
586
- ]
587
- )
588
- image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
589
- h_start, h_end, w_start, w_end = views[j]
590
- h_start, h_end, w_start, w_end = (
591
- h_start * self.vae_scale_factor,
592
- h_end * self.vae_scale_factor,
593
- w_start * self.vae_scale_factor,
594
- w_end * self.vae_scale_factor,
595
- )
596
- p_h_start, p_h_end, p_w_start, p_w_end = (
597
- pad_size * self.vae_scale_factor,
598
- image_patch.size(2) - pad_size * self.vae_scale_factor,
599
- pad_size * self.vae_scale_factor,
600
- image_patch.size(3) - pad_size * self.vae_scale_factor,
601
- )
602
- image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end]
603
- count[:, :, h_start:h_end, w_start:w_end] += 1
604
- progress_bar.update()
605
- image = image / count
606
-
607
- return image
608
-
609
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
610
- def upcast_vae(self):
611
- dtype = self.vae.dtype
612
- self.vae.to(dtype=torch.float32)
613
- use_torch_2_0_or_xformers = isinstance(
614
- self.vae.decoder.mid_block.attentions[0].processor,
615
- (
616
- AttnProcessor2_0,
617
- XFormersAttnProcessor,
618
- LoRAXFormersAttnProcessor,
619
- LoRAAttnProcessor2_0,
620
- ),
621
- )
622
- # if xformers or torch_2_0 is used attention block does not need
623
- # to be in float32 which can save lots of memory
624
- if use_torch_2_0_or_xformers:
625
- self.vae.post_quant_conv.to(dtype)
626
- self.vae.decoder.conv_in.to(dtype)
627
- self.vae.decoder.mid_block.to(dtype)
628
-
629
- @torch.no_grad()
630
- @replace_example_docstring(EXAMPLE_DOC_STRING)
631
- def __call__(
632
- self,
633
- prompt: Union[str, List[str]] = None,
634
- prompt_2: Optional[Union[str, List[str]]] = None,
635
- height: Optional[int] = None,
636
- width: Optional[int] = None,
637
- num_inference_steps: int = 50,
638
- denoising_end: Optional[float] = None,
639
- guidance_scale: float = 5.0,
640
- negative_prompt: Optional[Union[str, List[str]]] = None,
641
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
642
- num_images_per_prompt: Optional[int] = 1,
643
- eta: float = 0.0,
644
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
645
- latents: Optional[torch.Tensor] = None,
646
- prompt_embeds: Optional[torch.Tensor] = None,
647
- negative_prompt_embeds: Optional[torch.Tensor] = None,
648
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
649
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
650
- output_type: Optional[str] = "pil",
651
- return_dict: bool = False,
652
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
653
- callback_steps: int = 1,
654
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
655
- guidance_rescale: float = 0.0,
656
- original_size: Optional[Tuple[int, int]] = None,
657
- crops_coords_top_left: Tuple[int, int] = (0, 0),
658
- target_size: Optional[Tuple[int, int]] = None,
659
- negative_original_size: Optional[Tuple[int, int]] = None,
660
- negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
661
- negative_target_size: Optional[Tuple[int, int]] = None,
662
- ################### DemoFusion specific parameters ####################
663
- view_batch_size: int = 16,
664
- multi_decoder: bool = True,
665
- stride: Optional[int] = 64,
666
- cosine_scale_1: Optional[float] = 3.0,
667
- cosine_scale_2: Optional[float] = 1.0,
668
- cosine_scale_3: Optional[float] = 1.0,
669
- sigma: Optional[float] = 0.8,
670
- show_image: bool = False,
671
- ):
672
- r"""
673
- Function invoked when calling the pipeline for generation.
674
-
675
- Args:
676
- prompt (`str` or `List[str]`, *optional*):
677
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
678
- instead.
679
- prompt_2 (`str` or `List[str]`, *optional*):
680
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
681
- used in both text-encoders
682
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
683
- The height in pixels of the generated image. This is set to 1024 by default for the best results.
684
- Anything below 512 pixels won't work well for
685
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
686
- and checkpoints that are not specifically fine-tuned on low resolutions.
687
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
688
- The width in pixels of the generated image. This is set to 1024 by default for the best results.
689
- Anything below 512 pixels won't work well for
690
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
691
- and checkpoints that are not specifically fine-tuned on low resolutions.
692
- num_inference_steps (`int`, *optional*, defaults to 50):
693
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
694
- expense of slower inference.
695
- denoising_end (`float`, *optional*):
696
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
697
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
698
- still retain a substantial amount of noise as determined by the discrete timesteps selected by the
699
- scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
700
- "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
701
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
702
- guidance_scale (`float`, *optional*, defaults to 5.0):
703
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
704
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
705
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
706
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
707
- usually at the expense of lower image quality.
708
- negative_prompt (`str` or `List[str]`, *optional*):
709
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
710
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
711
- less than `1`).
712
- negative_prompt_2 (`str` or `List[str]`, *optional*):
713
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
714
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
715
- num_images_per_prompt (`int`, *optional*, defaults to 1):
716
- The number of images to generate per prompt.
717
- eta (`float`, *optional*, defaults to 0.0):
718
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
719
- [`schedulers.DDIMScheduler`], will be ignored for others.
720
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
721
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
722
- to make generation deterministic.
723
- latents (`torch.Tensor`, *optional*):
724
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
725
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
726
- tensor will ge generated by sampling using the supplied random `generator`.
727
- prompt_embeds (`torch.Tensor`, *optional*):
728
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
729
- provided, text embeddings will be generated from `prompt` input argument.
730
- negative_prompt_embeds (`torch.Tensor`, *optional*):
731
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
732
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
733
- argument.
734
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
735
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
736
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
737
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
738
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
739
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
740
- input argument.
741
- output_type (`str`, *optional*, defaults to `"pil"`):
742
- The output format of the generate image. Choose between
743
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
744
- return_dict (`bool`, *optional*, defaults to `True`):
745
- Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
746
- of a plain tuple.
747
- callback (`Callable`, *optional*):
748
- A function that will be called every `callback_steps` steps during inference. The function will be
749
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
750
- callback_steps (`int`, *optional*, defaults to 1):
751
- The frequency at which the `callback` function will be called. If not specified, the callback will be
752
- called at every step.
753
- cross_attention_kwargs (`dict`, *optional*):
754
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
755
- `self.processor` in
756
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
757
- guidance_rescale (`float`, *optional*, defaults to 0.7):
758
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
759
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
760
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
761
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
762
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
763
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
764
- `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
765
- explained in section 2.2 of
766
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
767
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
768
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
769
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
770
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
771
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
772
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
773
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
774
- not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
775
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
776
- negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
777
- To negatively condition the generation process based on a specific image resolution. Part of SDXL's
778
- micro-conditioning as explained in section 2.2 of
779
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
780
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
781
- negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
782
- To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
783
- micro-conditioning as explained in section 2.2 of
784
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
785
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
786
- negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
787
- To negatively condition the generation process based on a target image resolution. It should be as same
788
- as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
789
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
790
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
791
- ################### DemoFusion specific parameters ####################
792
- view_batch_size (`int`, defaults to 16):
793
- The batch size for multiple denoising paths. Typically, a larger batch size can result in higher
794
- efficiency but comes with increased GPU memory requirements.
795
- multi_decoder (`bool`, defaults to True):
796
- Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072,
797
- a tiled decoder becomes necessary.
798
- stride (`int`, defaults to 64):
799
- The stride of moving local patches. A smaller stride is better for alleviating seam issues,
800
- but it also introduces additional computational overhead and inference time.
801
- cosine_scale_1 (`float`, defaults to 3):
802
- Control the strength of skip-residual. For specific impacts, please refer to Appendix C
803
- in the DemoFusion paper.
804
- cosine_scale_2 (`float`, defaults to 1):
805
- Control the strength of dilated sampling. For specific impacts, please refer to Appendix C
806
- in the DemoFusion paper.
807
- cosine_scale_3 (`float`, defaults to 1):
808
- Control the strength of the gaussion filter. For specific impacts, please refer to Appendix C
809
- in the DemoFusion paper.
810
- sigma (`float`, defaults to 1):
811
- The standerd value of the gaussian filter.
812
- show_image (`bool`, defaults to False):
813
- Determine whether to show intermediate results during generation.
814
-
815
- Examples:
816
-
817
- Returns:
818
- a `list` with the generated images at each phase.
819
- """
820
-
821
- # 0. Default height and width to unet
822
- height = height or self.default_sample_size * self.vae_scale_factor
823
- width = width or self.default_sample_size * self.vae_scale_factor
824
-
825
- x1_size = self.default_sample_size * self.vae_scale_factor
826
-
827
- height_scale = height / x1_size
828
- width_scale = width / x1_size
829
- scale_num = int(max(height_scale, width_scale))
830
- aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale)
831
-
832
- original_size = original_size or (height, width)
833
- target_size = target_size or (height, width)
834
-
835
- # 1. Check inputs. Raise error if not correct
836
- self.check_inputs(
837
- prompt,
838
- prompt_2,
839
- height,
840
- width,
841
- callback_steps,
842
- negative_prompt,
843
- negative_prompt_2,
844
- prompt_embeds,
845
- negative_prompt_embeds,
846
- pooled_prompt_embeds,
847
- negative_pooled_prompt_embeds,
848
- num_images_per_prompt,
849
- )
850
-
851
- # 2. Define call parameters
852
- if prompt is not None and isinstance(prompt, str):
853
- batch_size = 1
854
- elif prompt is not None and isinstance(prompt, list):
855
- batch_size = len(prompt)
856
- else:
857
- batch_size = prompt_embeds.shape[0]
858
-
859
- device = self._execution_device
860
-
861
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
862
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
863
- # corresponds to doing no classifier free guidance.
864
- do_classifier_free_guidance = guidance_scale > 1.0
865
-
866
- # 3. Encode input prompt
867
- text_encoder_lora_scale = (
868
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
869
- )
870
- (
871
- prompt_embeds,
872
- negative_prompt_embeds,
873
- pooled_prompt_embeds,
874
- negative_pooled_prompt_embeds,
875
- ) = self.encode_prompt(
876
- prompt=prompt,
877
- prompt_2=prompt_2,
878
- device=device,
879
- num_images_per_prompt=num_images_per_prompt,
880
- do_classifier_free_guidance=do_classifier_free_guidance,
881
- negative_prompt=negative_prompt,
882
- negative_prompt_2=negative_prompt_2,
883
- prompt_embeds=prompt_embeds,
884
- negative_prompt_embeds=negative_prompt_embeds,
885
- pooled_prompt_embeds=pooled_prompt_embeds,
886
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
887
- lora_scale=text_encoder_lora_scale,
888
- )
889
-
890
- # 4. Prepare timesteps
891
- self.scheduler.set_timesteps(num_inference_steps, device=device)
892
-
893
- timesteps = self.scheduler.timesteps
894
-
895
- # 5. Prepare latent variables
896
- num_channels_latents = self.unet.config.in_channels
897
- latents = self.prepare_latents(
898
- batch_size * num_images_per_prompt,
899
- num_channels_latents,
900
- height // scale_num,
901
- width // scale_num,
902
- prompt_embeds.dtype,
903
- device,
904
- generator,
905
- latents,
906
- )
907
-
908
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
909
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
910
-
911
- # 7. Prepare added time ids & embeddings
912
- add_text_embeds = pooled_prompt_embeds
913
- add_time_ids = self._get_add_time_ids(
914
- original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
915
- )
916
- if negative_original_size is not None and negative_target_size is not None:
917
- negative_add_time_ids = self._get_add_time_ids(
918
- negative_original_size,
919
- negative_crops_coords_top_left,
920
- negative_target_size,
921
- dtype=prompt_embeds.dtype,
922
- )
923
- else:
924
- negative_add_time_ids = add_time_ids
925
-
926
- if do_classifier_free_guidance:
927
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
928
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
929
- add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
930
-
931
- prompt_embeds = prompt_embeds.to(device)
932
- add_text_embeds = add_text_embeds.to(device)
933
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
934
-
935
- # 8. Denoising loop
936
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
937
-
938
- # 7.1 Apply denoising_end
939
- if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
940
- discrete_timestep_cutoff = int(
941
- round(
942
- self.scheduler.config.num_train_timesteps
943
- - (denoising_end * self.scheduler.config.num_train_timesteps)
944
- )
945
- )
946
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
947
- timesteps = timesteps[:num_inference_steps]
948
-
949
- output_images = []
950
-
951
- ############################################################### Phase 1 #################################################################
952
-
953
- print("### Phase 1 Denoising ###")
954
- with self.progress_bar(total=num_inference_steps) as progress_bar:
955
- for i, t in enumerate(timesteps):
956
- latents_for_view = latents
957
-
958
- # expand the latents if we are doing classifier free guidance
959
- latent_model_input = latents.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latents
960
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
961
-
962
- # predict the noise residual
963
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
964
- noise_pred = self.unet(
965
- latent_model_input,
966
- t,
967
- encoder_hidden_states=prompt_embeds,
968
- cross_attention_kwargs=cross_attention_kwargs,
969
- added_cond_kwargs=added_cond_kwargs,
970
- return_dict=False,
971
- )[0]
972
-
973
- # perform guidance
974
- if do_classifier_free_guidance:
975
- noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
976
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
977
-
978
- if do_classifier_free_guidance and guidance_rescale > 0.0:
979
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
980
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
981
-
982
- # compute the previous noisy sample x_t -> x_t-1
983
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
984
-
985
- # call the callback, if provided
986
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
987
- progress_bar.update()
988
- if callback is not None and i % callback_steps == 0:
989
- step_idx = i // getattr(self.scheduler, "order", 1)
990
- callback(step_idx, t, latents)
991
-
992
- anchor_mean = latents.mean()
993
- anchor_std = latents.std()
994
- if not output_type == "latent":
995
- # make sure the VAE is in float32 mode, as it overflows in float16
996
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
997
-
998
- if needs_upcasting:
999
- self.upcast_vae()
1000
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1001
- print("### Phase 1 Decoding ###")
1002
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1003
- # cast back to fp16 if needed
1004
- if needs_upcasting:
1005
- self.vae.to(dtype=torch.float16)
1006
-
1007
- image = self.image_processor.postprocess(image, output_type=output_type)
1008
- if show_image:
1009
- plt.figure(figsize=(10, 10))
1010
- plt.imshow(image[0])
1011
- plt.axis("off") # Turn off axis numbers and ticks
1012
- plt.show()
1013
- output_images.append(image[0])
1014
-
1015
- ####################################################### Phase 2+ #####################################################
1016
-
1017
- for current_scale_num in range(2, scale_num + 1):
1018
- print("### Phase {} Denoising ###".format(current_scale_num))
1019
- current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
1020
- current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
1021
- if height > width:
1022
- current_width = int(current_width * aspect_ratio)
1023
- else:
1024
- current_height = int(current_height * aspect_ratio)
1025
-
1026
- latents = F.interpolate(
1027
- latents,
1028
- size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)),
1029
- mode="bicubic",
1030
- )
1031
-
1032
- noise_latents = []
1033
- noise = torch.randn_like(latents)
1034
- for timestep in timesteps:
1035
- noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
1036
- noise_latents.append(noise_latent)
1037
- latents = noise_latents[0]
1038
-
1039
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1040
- for i, t in enumerate(timesteps):
1041
- count = torch.zeros_like(latents)
1042
- value = torch.zeros_like(latents)
1043
- cosine_factor = (
1044
- 0.5
1045
- * (
1046
- 1
1047
- + torch.cos(
1048
- torch.pi
1049
- * (self.scheduler.config.num_train_timesteps - t)
1050
- / self.scheduler.config.num_train_timesteps
1051
- )
1052
- ).cpu()
1053
- )
1054
-
1055
- c1 = cosine_factor**cosine_scale_1
1056
- latents = latents * (1 - c1) + noise_latents[i] * c1
1057
-
1058
- ############################################# MultiDiffusion #############################################
1059
-
1060
- views = self.get_views(
1061
- current_height,
1062
- current_width,
1063
- stride=stride,
1064
- window_size=self.unet.config.sample_size,
1065
- random_jitter=True,
1066
- )
1067
- views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1068
-
1069
- jitter_range = (self.unet.config.sample_size - stride) // 4
1070
- latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), "constant", 0)
1071
-
1072
- count_local = torch.zeros_like(latents_)
1073
- value_local = torch.zeros_like(latents_)
1074
-
1075
- for j, batch_view in enumerate(views_batch):
1076
- vb_size = len(batch_view)
1077
-
1078
- # get the latents corresponding to the current view coordinates
1079
- latents_for_view = torch.cat(
1080
- [
1081
- latents_[:, :, h_start:h_end, w_start:w_end]
1082
- for h_start, h_end, w_start, w_end in batch_view
1083
- ]
1084
- )
1085
-
1086
- # expand the latents if we are doing classifier free guidance
1087
- latent_model_input = latents_for_view
1088
- latent_model_input = (
1089
- latent_model_input.repeat_interleave(2, dim=0)
1090
- if do_classifier_free_guidance
1091
- else latent_model_input
1092
- )
1093
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1094
-
1095
- prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1096
- add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1097
- add_time_ids_input = []
1098
- for h_start, h_end, w_start, w_end in batch_view:
1099
- add_time_ids_ = add_time_ids.clone()
1100
- add_time_ids_[:, 2] = h_start * self.vae_scale_factor
1101
- add_time_ids_[:, 3] = w_start * self.vae_scale_factor
1102
- add_time_ids_input.append(add_time_ids_)
1103
- add_time_ids_input = torch.cat(add_time_ids_input)
1104
-
1105
- # predict the noise residual
1106
- added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1107
- noise_pred = self.unet(
1108
- latent_model_input,
1109
- t,
1110
- encoder_hidden_states=prompt_embeds_input,
1111
- cross_attention_kwargs=cross_attention_kwargs,
1112
- added_cond_kwargs=added_cond_kwargs,
1113
- return_dict=False,
1114
- )[0]
1115
-
1116
- if do_classifier_free_guidance:
1117
- noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1118
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1119
-
1120
- if do_classifier_free_guidance and guidance_rescale > 0.0:
1121
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1122
- noise_pred = rescale_noise_cfg(
1123
- noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
1124
- )
1125
-
1126
- # compute the previous noisy sample x_t -> x_t-1
1127
- self.scheduler._init_step_index(t)
1128
- latents_denoised_batch = self.scheduler.step(
1129
- noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False
1130
- )[0]
1131
-
1132
- # extract value from batch
1133
- for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
1134
- latents_denoised_batch.chunk(vb_size), batch_view
1135
- ):
1136
- value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
1137
- count_local[:, :, h_start:h_end, w_start:w_end] += 1
1138
-
1139
- value_local = value_local[
1140
- :,
1141
- :,
1142
- jitter_range : jitter_range + current_height // self.vae_scale_factor,
1143
- jitter_range : jitter_range + current_width // self.vae_scale_factor,
1144
- ]
1145
- count_local = count_local[
1146
- :,
1147
- :,
1148
- jitter_range : jitter_range + current_height // self.vae_scale_factor,
1149
- jitter_range : jitter_range + current_width // self.vae_scale_factor,
1150
- ]
1151
-
1152
- c2 = cosine_factor**cosine_scale_2
1153
-
1154
- value += value_local / count_local * (1 - c2)
1155
- count += torch.ones_like(value_local) * (1 - c2)
1156
-
1157
- ############################################# Dilated Sampling #############################################
1158
-
1159
- views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)]
1160
- views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1161
-
1162
- h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
1163
- w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
1164
- latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), "constant", 0)
1165
-
1166
- count_global = torch.zeros_like(latents_)
1167
- value_global = torch.zeros_like(latents_)
1168
-
1169
- c3 = 0.99 * cosine_factor**cosine_scale_3 + 1e-2
1170
- std_, mean_ = latents_.std(), latents_.mean()
1171
- latents_gaussian = gaussian_filter(
1172
- latents_, kernel_size=(2 * current_scale_num - 1), sigma=sigma * c3
1173
- )
1174
- latents_gaussian = (
1175
- latents_gaussian - latents_gaussian.mean()
1176
- ) / latents_gaussian.std() * std_ + mean_
1177
-
1178
- for j, batch_view in enumerate(views_batch):
1179
- latents_for_view = torch.cat(
1180
- [latents_[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view]
1181
- )
1182
- latents_for_view_gaussian = torch.cat(
1183
- [latents_gaussian[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view]
1184
- )
1185
-
1186
- vb_size = latents_for_view.size(0)
1187
-
1188
- # expand the latents if we are doing classifier free guidance
1189
- latent_model_input = latents_for_view_gaussian
1190
- latent_model_input = (
1191
- latent_model_input.repeat_interleave(2, dim=0)
1192
- if do_classifier_free_guidance
1193
- else latent_model_input
1194
- )
1195
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1196
-
1197
- prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1198
- add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1199
- add_time_ids_input = torch.cat([add_time_ids] * vb_size)
1200
-
1201
- # predict the noise residual
1202
- added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1203
- noise_pred = self.unet(
1204
- latent_model_input,
1205
- t,
1206
- encoder_hidden_states=prompt_embeds_input,
1207
- cross_attention_kwargs=cross_attention_kwargs,
1208
- added_cond_kwargs=added_cond_kwargs,
1209
- return_dict=False,
1210
- )[0]
1211
-
1212
- if do_classifier_free_guidance:
1213
- noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1214
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1215
-
1216
- if do_classifier_free_guidance and guidance_rescale > 0.0:
1217
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1218
- noise_pred = rescale_noise_cfg(
1219
- noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
1220
- )
1221
-
1222
- # compute the previous noisy sample x_t -> x_t-1
1223
- self.scheduler._init_step_index(t)
1224
- latents_denoised_batch = self.scheduler.step(
1225
- noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False
1226
- )[0]
1227
-
1228
- # extract value from batch
1229
- for latents_view_denoised, (h, w) in zip(latents_denoised_batch.chunk(vb_size), batch_view):
1230
- value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised
1231
- count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
1232
-
1233
- c2 = cosine_factor**cosine_scale_2
1234
-
1235
- value_global = value_global[:, :, h_pad:, w_pad:]
1236
-
1237
- value += value_global * c2
1238
- count += torch.ones_like(value_global) * c2
1239
-
1240
- ###########################################################
1241
-
1242
- latents = torch.where(count > 0, value / count, value)
1243
-
1244
- # call the callback, if provided
1245
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1246
- progress_bar.update()
1247
- if callback is not None and i % callback_steps == 0:
1248
- step_idx = i // getattr(self.scheduler, "order", 1)
1249
- callback(step_idx, t, latents)
1250
-
1251
- #########################################################################################################################################
1252
-
1253
- latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
1254
- if not output_type == "latent":
1255
- # make sure the VAE is in float32 mode, as it overflows in float16
1256
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1257
-
1258
- if needs_upcasting:
1259
- self.upcast_vae()
1260
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1261
-
1262
- print("### Phase {} Decoding ###".format(current_scale_num))
1263
- if multi_decoder:
1264
- image = self.tiled_decode(latents, current_height, current_width)
1265
- else:
1266
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1267
-
1268
- # cast back to fp16 if needed
1269
- if needs_upcasting:
1270
- self.vae.to(dtype=torch.float16)
1271
- else:
1272
- image = latents
1273
-
1274
- if not output_type == "latent":
1275
- image = self.image_processor.postprocess(image, output_type=output_type)
1276
- if show_image:
1277
- plt.figure(figsize=(10, 10))
1278
- plt.imshow(image[0])
1279
- plt.axis("off") # Turn off axis numbers and ticks
1280
- plt.show()
1281
- output_images.append(image[0])
1282
-
1283
- # Offload all models
1284
- self.maybe_free_model_hooks()
1285
-
1286
- return output_images
1287
-
1288
- # Override to properly handle the loading and unloading of the additional text encoder.
1289
- def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
1290
- # We could have accessed the unet config from `lora_state_dict()` too. We pass
1291
- # it here explicitly to be able to tell that it's coming from an SDXL
1292
- # pipeline.
1293
-
1294
- # Remove any existing hooks.
1295
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
1296
- from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
1297
- else:
1298
- raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
1299
-
1300
- is_model_cpu_offload = False
1301
- is_sequential_cpu_offload = False
1302
- recursive = False
1303
- for _, component in self.components.items():
1304
- if isinstance(component, torch.nn.Module):
1305
- if hasattr(component, "_hf_hook"):
1306
- is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
1307
- is_sequential_cpu_offload = (
1308
- isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
1309
- or hasattr(component._hf_hook, "hooks")
1310
- and isinstance(component._hf_hook.hooks[0], AlignDevicesHook)
1311
- )
1312
- logger.info(
1313
- "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
1314
- )
1315
- recursive = is_sequential_cpu_offload
1316
- remove_hook_from_module(component, recurse=recursive)
1317
- state_dict, network_alphas = self.lora_state_dict(
1318
- pretrained_model_name_or_path_or_dict,
1319
- unet_config=self.unet.config,
1320
- **kwargs,
1321
- )
1322
- self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
1323
-
1324
- text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1325
- if len(text_encoder_state_dict) > 0:
1326
- self.load_lora_into_text_encoder(
1327
- text_encoder_state_dict,
1328
- network_alphas=network_alphas,
1329
- text_encoder=self.text_encoder,
1330
- prefix="text_encoder",
1331
- lora_scale=self.lora_scale,
1332
- )
1333
-
1334
- text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1335
- if len(text_encoder_2_state_dict) > 0:
1336
- self.load_lora_into_text_encoder(
1337
- text_encoder_2_state_dict,
1338
- network_alphas=network_alphas,
1339
- text_encoder=self.text_encoder_2,
1340
- prefix="text_encoder_2",
1341
- lora_scale=self.lora_scale,
1342
- )
1343
-
1344
- # Offload back.
1345
- if is_model_cpu_offload:
1346
- self.enable_model_cpu_offload()
1347
- elif is_sequential_cpu_offload:
1348
- self.enable_sequential_cpu_offload()
1349
-
1350
- @classmethod
1351
- def save_lora_weights(
1352
- self,
1353
- save_directory: Union[str, os.PathLike],
1354
- unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1355
- text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1356
- text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1357
- is_main_process: bool = True,
1358
- weight_name: str = None,
1359
- save_function: Callable = None,
1360
- safe_serialization: bool = True,
1361
- ):
1362
- state_dict = {}
1363
-
1364
- def pack_weights(layers, prefix):
1365
- layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
1366
- layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1367
- return layers_state_dict
1368
-
1369
- if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
1370
- raise ValueError(
1371
- "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
1372
- )
1373
-
1374
- if unet_lora_layers:
1375
- state_dict.update(pack_weights(unet_lora_layers, "unet"))
1376
-
1377
- if text_encoder_lora_layers and text_encoder_2_lora_layers:
1378
- state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1379
- state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1380
-
1381
- self.write_lora_layers(
1382
- state_dict=state_dict,
1383
- save_directory=save_directory,
1384
- is_main_process=is_main_process,
1385
- weight_name=weight_name,
1386
- save_function=save_function,
1387
- safe_serialization=safe_serialization,
1388
- )
1389
-
1390
- def _remove_text_encoder_monkey_patch(self):
1391
- self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1392
- self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_fabric.py DELETED
@@ -1,751 +0,0 @@
1
- # Copyright 2024 FABRIC authors and the HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from typing import List, Optional, Union
15
-
16
- import torch
17
- from packaging import version
18
- from PIL import Image
19
- from transformers import CLIPTextModel, CLIPTokenizer
20
-
21
- from diffusers import AutoencoderKL, UNet2DConditionModel
22
- from diffusers.configuration_utils import FrozenDict
23
- from diffusers.image_processor import VaeImageProcessor
24
- from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
25
- from diffusers.models.attention import BasicTransformerBlock
26
- from diffusers.models.attention_processor import LoRAAttnProcessor
27
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline
28
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
29
- from diffusers.schedulers import EulerAncestralDiscreteScheduler, KarrasDiffusionSchedulers
30
- from diffusers.utils import (
31
- deprecate,
32
- logging,
33
- replace_example_docstring,
34
- )
35
- from diffusers.utils.torch_utils import randn_tensor
36
-
37
-
38
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
-
40
- EXAMPLE_DOC_STRING = """
41
- Examples:
42
- ```py
43
- >>> from diffusers import DiffusionPipeline
44
- >>> import torch
45
-
46
- >>> model_id = "dreamlike-art/dreamlike-photoreal-2.0"
47
- >>> pipe = DiffusionPipeline(model_id, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric")
48
- >>> pipe = pipe.to("cuda")
49
- >>> prompt = "a giant standing in a fantasy landscape best quality"
50
- >>> liked = [] # list of images for positive feedback
51
- >>> disliked = [] # list of images for negative feedback
52
- >>> image = pipe(prompt, num_images=4, liked=liked, disliked=disliked).images[0]
53
- ```
54
- """
55
-
56
-
57
- class FabricCrossAttnProcessor:
58
- def __init__(self):
59
- self.attntion_probs = None
60
-
61
- def __call__(
62
- self,
63
- attn,
64
- hidden_states,
65
- encoder_hidden_states=None,
66
- attention_mask=None,
67
- weights=None,
68
- lora_scale=1.0,
69
- ):
70
- batch_size, sequence_length, _ = (
71
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
72
- )
73
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
74
-
75
- if isinstance(attn.processor, LoRAAttnProcessor):
76
- query = attn.to_q(hidden_states) + lora_scale * attn.processor.to_q_lora(hidden_states)
77
- else:
78
- query = attn.to_q(hidden_states)
79
-
80
- if encoder_hidden_states is None:
81
- encoder_hidden_states = hidden_states
82
- elif attn.norm_cross:
83
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
84
-
85
- if isinstance(attn.processor, LoRAAttnProcessor):
86
- key = attn.to_k(encoder_hidden_states) + lora_scale * attn.processor.to_k_lora(encoder_hidden_states)
87
- value = attn.to_v(encoder_hidden_states) + lora_scale * attn.processor.to_v_lora(encoder_hidden_states)
88
- else:
89
- key = attn.to_k(encoder_hidden_states)
90
- value = attn.to_v(encoder_hidden_states)
91
-
92
- query = attn.head_to_batch_dim(query)
93
- key = attn.head_to_batch_dim(key)
94
- value = attn.head_to_batch_dim(value)
95
-
96
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
97
-
98
- if weights is not None:
99
- if weights.shape[0] != 1:
100
- weights = weights.repeat_interleave(attn.heads, dim=0)
101
- attention_probs = attention_probs * weights[:, None]
102
- attention_probs = attention_probs / attention_probs.sum(dim=-1, keepdim=True)
103
-
104
- hidden_states = torch.bmm(attention_probs, value)
105
- hidden_states = attn.batch_to_head_dim(hidden_states)
106
-
107
- # linear proj
108
- if isinstance(attn.processor, LoRAAttnProcessor):
109
- hidden_states = attn.to_out[0](hidden_states) + lora_scale * attn.processor.to_out_lora(hidden_states)
110
- else:
111
- hidden_states = attn.to_out[0](hidden_states)
112
- # dropout
113
- hidden_states = attn.to_out[1](hidden_states)
114
-
115
- return hidden_states
116
-
117
-
118
- class FabricPipeline(DiffusionPipeline):
119
- r"""
120
- Pipeline for text-to-image generation using Stable Diffusion and conditioning the results using feedback images.
121
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
122
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
123
-
124
- Args:
125
- vae ([`AutoencoderKL`]):
126
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
127
- text_encoder ([`~transformers.CLIPTextModel`]):
128
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
129
- tokenizer ([`~transformers.CLIPTokenizer`]):
130
- A `CLIPTokenizer` to tokenize text.
131
- unet ([`UNet2DConditionModel`]):
132
- A `UNet2DConditionModel` to denoise the encoded image latents.
133
- scheduler ([`EulerAncestralDiscreteScheduler`]):
134
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
135
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
136
- safety_checker ([`StableDiffusionSafetyChecker`]):
137
- Classification module that estimates whether generated images could be considered offensive or harmful.
138
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
139
- about a model's potential harms.
140
- """
141
-
142
- def __init__(
143
- self,
144
- vae: AutoencoderKL,
145
- text_encoder: CLIPTextModel,
146
- tokenizer: CLIPTokenizer,
147
- unet: UNet2DConditionModel,
148
- scheduler: KarrasDiffusionSchedulers,
149
- requires_safety_checker: bool = True,
150
- ):
151
- super().__init__()
152
-
153
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
154
- version.parse(unet.config._diffusers_version).base_version
155
- ) < version.parse("0.9.0.dev0")
156
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
157
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
158
- deprecation_message = (
159
- "The configuration file of the unet has set the default `sample_size` to smaller than"
160
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
161
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
162
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
163
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
164
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
165
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
166
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
167
- " the `unet/config.json` file"
168
- )
169
-
170
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
171
- new_config = dict(unet.config)
172
- new_config["sample_size"] = 64
173
- unet._internal_dict = FrozenDict(new_config)
174
-
175
- self.register_modules(
176
- unet=unet,
177
- vae=vae,
178
- text_encoder=text_encoder,
179
- tokenizer=tokenizer,
180
- scheduler=scheduler,
181
- )
182
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
183
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
184
-
185
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
186
- def _encode_prompt(
187
- self,
188
- prompt,
189
- device,
190
- num_images_per_prompt,
191
- do_classifier_free_guidance,
192
- negative_prompt=None,
193
- prompt_embeds: Optional[torch.Tensor] = None,
194
- negative_prompt_embeds: Optional[torch.Tensor] = None,
195
- lora_scale: Optional[float] = None,
196
- ):
197
- r"""
198
- Encodes the prompt into text encoder hidden states.
199
-
200
- Args:
201
- prompt (`str` or `List[str]`, *optional*):
202
- prompt to be encoded
203
- device: (`torch.device`):
204
- torch device
205
- num_images_per_prompt (`int`):
206
- number of images that should be generated per prompt
207
- do_classifier_free_guidance (`bool`):
208
- whether to use classifier free guidance or not
209
- negative_prompt (`str` or `List[str]`, *optional*):
210
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
211
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
212
- less than `1`).
213
- prompt_embeds (`torch.Tensor`, *optional*):
214
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
215
- provided, text embeddings will be generated from `prompt` input argument.
216
- negative_prompt_embeds (`torch.Tensor`, *optional*):
217
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
218
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
219
- argument.
220
- lora_scale (`float`, *optional*):
221
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
222
- """
223
- # set lora scale so that monkey patched LoRA
224
- # function of text encoder can correctly access it
225
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
226
- self._lora_scale = lora_scale
227
-
228
- if prompt is not None and isinstance(prompt, str):
229
- batch_size = 1
230
- elif prompt is not None and isinstance(prompt, list):
231
- batch_size = len(prompt)
232
- else:
233
- batch_size = prompt_embeds.shape[0]
234
-
235
- if prompt_embeds is None:
236
- # textual inversion: process multi-vector tokens if necessary
237
- if isinstance(self, TextualInversionLoaderMixin):
238
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
239
-
240
- text_inputs = self.tokenizer(
241
- prompt,
242
- padding="max_length",
243
- max_length=self.tokenizer.model_max_length,
244
- truncation=True,
245
- return_tensors="pt",
246
- )
247
- text_input_ids = text_inputs.input_ids
248
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
249
-
250
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
251
- text_input_ids, untruncated_ids
252
- ):
253
- removed_text = self.tokenizer.batch_decode(
254
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
255
- )
256
- logger.warning(
257
- "The following part of your input was truncated because CLIP can only handle sequences up to"
258
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
259
- )
260
-
261
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
262
- attention_mask = text_inputs.attention_mask.to(device)
263
- else:
264
- attention_mask = None
265
-
266
- prompt_embeds = self.text_encoder(
267
- text_input_ids.to(device),
268
- attention_mask=attention_mask,
269
- )
270
- prompt_embeds = prompt_embeds[0]
271
-
272
- if self.text_encoder is not None:
273
- prompt_embeds_dtype = self.text_encoder.dtype
274
- elif self.unet is not None:
275
- prompt_embeds_dtype = self.unet.dtype
276
- else:
277
- prompt_embeds_dtype = prompt_embeds.dtype
278
-
279
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
280
-
281
- bs_embed, seq_len, _ = prompt_embeds.shape
282
- # duplicate text embeddings for each generation per prompt, using mps friendly method
283
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
284
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
285
-
286
- # get unconditional embeddings for classifier free guidance
287
- if do_classifier_free_guidance and negative_prompt_embeds is None:
288
- uncond_tokens: List[str]
289
- if negative_prompt is None:
290
- uncond_tokens = [""] * batch_size
291
- elif prompt is not None and type(prompt) is not type(negative_prompt):
292
- raise TypeError(
293
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
294
- f" {type(prompt)}."
295
- )
296
- elif isinstance(negative_prompt, str):
297
- uncond_tokens = [negative_prompt]
298
- elif batch_size != len(negative_prompt):
299
- raise ValueError(
300
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
301
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
302
- " the batch size of `prompt`."
303
- )
304
- else:
305
- uncond_tokens = negative_prompt
306
-
307
- # textual inversion: process multi-vector tokens if necessary
308
- if isinstance(self, TextualInversionLoaderMixin):
309
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
310
-
311
- max_length = prompt_embeds.shape[1]
312
- uncond_input = self.tokenizer(
313
- uncond_tokens,
314
- padding="max_length",
315
- max_length=max_length,
316
- truncation=True,
317
- return_tensors="pt",
318
- )
319
-
320
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
321
- attention_mask = uncond_input.attention_mask.to(device)
322
- else:
323
- attention_mask = None
324
-
325
- negative_prompt_embeds = self.text_encoder(
326
- uncond_input.input_ids.to(device),
327
- attention_mask=attention_mask,
328
- )
329
- negative_prompt_embeds = negative_prompt_embeds[0]
330
-
331
- if do_classifier_free_guidance:
332
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
333
- seq_len = negative_prompt_embeds.shape[1]
334
-
335
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
336
-
337
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
338
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
339
-
340
- # For classifier free guidance, we need to do two forward passes.
341
- # Here we concatenate the unconditional and text embeddings into a single batch
342
- # to avoid doing two forward passes
343
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
344
-
345
- return prompt_embeds
346
-
347
- def get_unet_hidden_states(self, z_all, t, prompt_embd):
348
- cached_hidden_states = []
349
- for module in self.unet.modules():
350
- if isinstance(module, BasicTransformerBlock):
351
-
352
- def new_forward(self, hidden_states, *args, **kwargs):
353
- cached_hidden_states.append(hidden_states.clone().detach().cpu())
354
- return self.old_forward(hidden_states, *args, **kwargs)
355
-
356
- module.attn1.old_forward = module.attn1.forward
357
- module.attn1.forward = new_forward.__get__(module.attn1)
358
-
359
- # run forward pass to cache hidden states, output can be discarded
360
- _ = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
361
-
362
- # restore original forward pass
363
- for module in self.unet.modules():
364
- if isinstance(module, BasicTransformerBlock):
365
- module.attn1.forward = module.attn1.old_forward
366
- del module.attn1.old_forward
367
-
368
- return cached_hidden_states
369
-
370
- def unet_forward_with_cached_hidden_states(
371
- self,
372
- z_all,
373
- t,
374
- prompt_embd,
375
- cached_pos_hiddens: Optional[List[torch.Tensor]] = None,
376
- cached_neg_hiddens: Optional[List[torch.Tensor]] = None,
377
- pos_weights=(0.8, 0.8),
378
- neg_weights=(0.5, 0.5),
379
- ):
380
- if cached_pos_hiddens is None and cached_neg_hiddens is None:
381
- return self.unet(z_all, t, encoder_hidden_states=prompt_embd)
382
-
383
- local_pos_weights = torch.linspace(*pos_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
384
- local_neg_weights = torch.linspace(*neg_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
385
- for block, pos_weight, neg_weight in zip(
386
- self.unet.down_blocks + [self.unet.mid_block] + self.unet.up_blocks,
387
- local_pos_weights + [pos_weights[1]] + local_pos_weights[::-1],
388
- local_neg_weights + [neg_weights[1]] + local_neg_weights[::-1],
389
- ):
390
- for module in block.modules():
391
- if isinstance(module, BasicTransformerBlock):
392
-
393
- def new_forward(
394
- self,
395
- hidden_states,
396
- pos_weight=pos_weight,
397
- neg_weight=neg_weight,
398
- **kwargs,
399
- ):
400
- cond_hiddens, uncond_hiddens = hidden_states.chunk(2, dim=0)
401
- batch_size, d_model = cond_hiddens.shape[:2]
402
- device, dtype = hidden_states.device, hidden_states.dtype
403
-
404
- weights = torch.ones(batch_size, d_model, device=device, dtype=dtype)
405
- out_pos = self.old_forward(hidden_states)
406
- out_neg = self.old_forward(hidden_states)
407
-
408
- if cached_pos_hiddens is not None:
409
- cached_pos_hs = cached_pos_hiddens.pop(0).to(hidden_states.device)
410
- cond_pos_hs = torch.cat([cond_hiddens, cached_pos_hs], dim=1)
411
- pos_weights = weights.clone().repeat(1, 1 + cached_pos_hs.shape[1] // d_model)
412
- pos_weights[:, d_model:] = pos_weight
413
- attn_with_weights = FabricCrossAttnProcessor()
414
- out_pos = attn_with_weights(
415
- self,
416
- cond_hiddens,
417
- encoder_hidden_states=cond_pos_hs,
418
- weights=pos_weights,
419
- )
420
- else:
421
- out_pos = self.old_forward(cond_hiddens)
422
-
423
- if cached_neg_hiddens is not None:
424
- cached_neg_hs = cached_neg_hiddens.pop(0).to(hidden_states.device)
425
- uncond_neg_hs = torch.cat([uncond_hiddens, cached_neg_hs], dim=1)
426
- neg_weights = weights.clone().repeat(1, 1 + cached_neg_hs.shape[1] // d_model)
427
- neg_weights[:, d_model:] = neg_weight
428
- attn_with_weights = FabricCrossAttnProcessor()
429
- out_neg = attn_with_weights(
430
- self,
431
- uncond_hiddens,
432
- encoder_hidden_states=uncond_neg_hs,
433
- weights=neg_weights,
434
- )
435
- else:
436
- out_neg = self.old_forward(uncond_hiddens)
437
-
438
- out = torch.cat([out_pos, out_neg], dim=0)
439
- return out
440
-
441
- module.attn1.old_forward = module.attn1.forward
442
- module.attn1.forward = new_forward.__get__(module.attn1)
443
-
444
- out = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
445
-
446
- # restore original forward pass
447
- for module in self.unet.modules():
448
- if isinstance(module, BasicTransformerBlock):
449
- module.attn1.forward = module.attn1.old_forward
450
- del module.attn1.old_forward
451
-
452
- return out
453
-
454
- def preprocess_feedback_images(self, images, vae, dim, device, dtype, generator) -> torch.tensor:
455
- images_t = [self.image_to_tensor(img, dim, dtype) for img in images]
456
- images_t = torch.stack(images_t).to(device)
457
- latents = vae.config.scaling_factor * vae.encode(images_t).latent_dist.sample(generator)
458
-
459
- return torch.cat([latents], dim=0)
460
-
461
- def check_inputs(
462
- self,
463
- prompt,
464
- negative_prompt=None,
465
- liked=None,
466
- disliked=None,
467
- height=None,
468
- width=None,
469
- ):
470
- if prompt is None:
471
- raise ValueError("Provide `prompt`. Cannot leave both `prompt` undefined.")
472
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
473
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
474
-
475
- if negative_prompt is not None and (
476
- not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list)
477
- ):
478
- raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
479
-
480
- if liked is not None and not isinstance(liked, list):
481
- raise ValueError(f"`liked` has to be of type `list` but is {type(liked)}")
482
-
483
- if disliked is not None and not isinstance(disliked, list):
484
- raise ValueError(f"`disliked` has to be of type `list` but is {type(disliked)}")
485
-
486
- if height is not None and not isinstance(height, int):
487
- raise ValueError(f"`height` has to be of type `int` but is {type(height)}")
488
-
489
- if width is not None and not isinstance(width, int):
490
- raise ValueError(f"`width` has to be of type `int` but is {type(width)}")
491
-
492
- @torch.no_grad()
493
- @replace_example_docstring(EXAMPLE_DOC_STRING)
494
- def __call__(
495
- self,
496
- prompt: Optional[Union[str, List[str]]] = "",
497
- negative_prompt: Optional[Union[str, List[str]]] = "lowres, bad anatomy, bad hands, cropped, worst quality",
498
- liked: Optional[Union[List[str], List[Image.Image]]] = [],
499
- disliked: Optional[Union[List[str], List[Image.Image]]] = [],
500
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
501
- height: int = 512,
502
- width: int = 512,
503
- return_dict: bool = True,
504
- num_images: int = 4,
505
- guidance_scale: float = 7.0,
506
- num_inference_steps: int = 20,
507
- output_type: Optional[str] = "pil",
508
- feedback_start_ratio: float = 0.33,
509
- feedback_end_ratio: float = 0.66,
510
- min_weight: float = 0.05,
511
- max_weight: float = 0.8,
512
- neg_scale: float = 0.5,
513
- pos_bottleneck_scale: float = 1.0,
514
- neg_bottleneck_scale: float = 1.0,
515
- latents: Optional[torch.Tensor] = None,
516
- ):
517
- r"""
518
- The call function to the pipeline for generation. Generate a trajectory of images with binary feedback. The
519
- feedback can be given as a list of liked and disliked images.
520
-
521
- Args:
522
- prompt (`str` or `List[str]`, *optional*):
523
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`
524
- instead.
525
- negative_prompt (`str` or `List[str]`, *optional*):
526
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
527
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
528
- liked (`List[Image.Image]` or `List[str]`, *optional*):
529
- Encourages images with liked features.
530
- disliked (`List[Image.Image]` or `List[str]`, *optional*):
531
- Discourages images with disliked features.
532
- generator (`torch.Generator` or `List[torch.Generator]` or `int`, *optional*):
533
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) or an `int` to
534
- make generation deterministic.
535
- height (`int`, *optional*, defaults to 512):
536
- Height of the generated image.
537
- width (`int`, *optional*, defaults to 512):
538
- Width of the generated image.
539
- num_images (`int`, *optional*, defaults to 4):
540
- The number of images to generate per prompt.
541
- guidance_scale (`float`, *optional*, defaults to 7.0):
542
- A higher guidance scale value encourages the model to generate images closely linked to the text
543
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
544
- num_inference_steps (`int`, *optional*, defaults to 20):
545
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
546
- expense of slower inference.
547
- output_type (`str`, *optional*, defaults to `"pil"`):
548
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
549
- return_dict (`bool`, *optional*, defaults to `True`):
550
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
551
- plain tuple.
552
- feedback_start_ratio (`float`, *optional*, defaults to `.33`):
553
- Start point for providing feedback (between 0 and 1).
554
- feedback_end_ratio (`float`, *optional*, defaults to `.66`):
555
- End point for providing feedback (between 0 and 1).
556
- min_weight (`float`, *optional*, defaults to `.05`):
557
- Minimum weight for feedback.
558
- max_weight (`float`, *optional*, defults tp `1.0`):
559
- Maximum weight for feedback.
560
- neg_scale (`float`, *optional*, defaults to `.5`):
561
- Scale factor for negative feedback.
562
-
563
- Examples:
564
-
565
- Returns:
566
- [`~pipelines.fabric.FabricPipelineOutput`] or `tuple`:
567
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
568
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
569
- second element is a list of `bool`s indicating whether the corresponding generated image contains
570
- "not-safe-for-work" (nsfw) content.
571
-
572
- """
573
-
574
- self.check_inputs(prompt, negative_prompt, liked, disliked)
575
-
576
- device = self._execution_device
577
- dtype = self.unet.dtype
578
-
579
- if isinstance(prompt, str) and prompt is not None:
580
- batch_size = 1
581
- elif isinstance(prompt, list) and prompt is not None:
582
- batch_size = len(prompt)
583
- else:
584
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
585
-
586
- if isinstance(negative_prompt, str):
587
- negative_prompt = negative_prompt
588
- elif isinstance(negative_prompt, list):
589
- negative_prompt = negative_prompt
590
- else:
591
- assert len(negative_prompt) == batch_size
592
-
593
- shape = (
594
- batch_size * num_images,
595
- self.unet.config.in_channels,
596
- height // self.vae_scale_factor,
597
- width // self.vae_scale_factor,
598
- )
599
- latent_noise = randn_tensor(
600
- shape,
601
- device=device,
602
- dtype=dtype,
603
- generator=generator,
604
- )
605
-
606
- positive_latents = (
607
- self.preprocess_feedback_images(liked, self.vae, (height, width), device, dtype, generator)
608
- if liked and len(liked) > 0
609
- else torch.tensor(
610
- [],
611
- device=device,
612
- dtype=dtype,
613
- )
614
- )
615
- negative_latents = (
616
- self.preprocess_feedback_images(disliked, self.vae, (height, width), device, dtype, generator)
617
- if disliked and len(disliked) > 0
618
- else torch.tensor(
619
- [],
620
- device=device,
621
- dtype=dtype,
622
- )
623
- )
624
-
625
- do_classifier_free_guidance = guidance_scale > 0.1
626
-
627
- (prompt_neg_embs, prompt_pos_embs) = self._encode_prompt(
628
- prompt,
629
- device,
630
- num_images,
631
- do_classifier_free_guidance,
632
- negative_prompt,
633
- ).split([num_images * batch_size, num_images * batch_size])
634
-
635
- batched_prompt_embd = torch.cat([prompt_pos_embs, prompt_neg_embs], dim=0)
636
-
637
- null_tokens = self.tokenizer(
638
- [""],
639
- return_tensors="pt",
640
- max_length=self.tokenizer.model_max_length,
641
- padding="max_length",
642
- truncation=True,
643
- )
644
-
645
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
646
- attention_mask = null_tokens.attention_mask.to(device)
647
- else:
648
- attention_mask = None
649
-
650
- null_prompt_emb = self.text_encoder(
651
- input_ids=null_tokens.input_ids.to(device),
652
- attention_mask=attention_mask,
653
- ).last_hidden_state
654
-
655
- null_prompt_emb = null_prompt_emb.to(device=device, dtype=dtype)
656
-
657
- self.scheduler.set_timesteps(num_inference_steps, device=device)
658
- timesteps = self.scheduler.timesteps
659
- latent_noise = latent_noise * self.scheduler.init_noise_sigma
660
-
661
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
662
-
663
- ref_start_idx = round(len(timesteps) * feedback_start_ratio)
664
- ref_end_idx = round(len(timesteps) * feedback_end_ratio)
665
-
666
- with self.progress_bar(total=num_inference_steps) as pbar:
667
- for i, t in enumerate(timesteps):
668
- sigma = self.scheduler.sigma_t[t] if hasattr(self.scheduler, "sigma_t") else 0
669
- if hasattr(self.scheduler, "sigmas"):
670
- sigma = self.scheduler.sigmas[i]
671
-
672
- alpha_hat = 1 / (sigma**2 + 1)
673
-
674
- z_single = self.scheduler.scale_model_input(latent_noise, t)
675
- z_all = torch.cat([z_single] * 2, dim=0)
676
- z_ref = torch.cat([positive_latents, negative_latents], dim=0)
677
-
678
- if i >= ref_start_idx and i <= ref_end_idx:
679
- weight_factor = max_weight
680
- else:
681
- weight_factor = min_weight
682
-
683
- pos_ws = (weight_factor, weight_factor * pos_bottleneck_scale)
684
- neg_ws = (weight_factor * neg_scale, weight_factor * neg_scale * neg_bottleneck_scale)
685
-
686
- if z_ref.size(0) > 0 and weight_factor > 0:
687
- noise = torch.randn_like(z_ref)
688
- if isinstance(self.scheduler, EulerAncestralDiscreteScheduler):
689
- z_ref_noised = (alpha_hat**0.5 * z_ref + (1 - alpha_hat) ** 0.5 * noise).type(dtype)
690
- else:
691
- z_ref_noised = self.scheduler.add_noise(z_ref, noise, t)
692
-
693
- ref_prompt_embd = torch.cat(
694
- [null_prompt_emb] * (len(positive_latents) + len(negative_latents)), dim=0
695
- )
696
- cached_hidden_states = self.get_unet_hidden_states(z_ref_noised, t, ref_prompt_embd)
697
-
698
- n_pos, n_neg = positive_latents.shape[0], negative_latents.shape[0]
699
- cached_pos_hs, cached_neg_hs = [], []
700
- for hs in cached_hidden_states:
701
- cached_pos, cached_neg = hs.split([n_pos, n_neg], dim=0)
702
- cached_pos = cached_pos.view(1, -1, *cached_pos.shape[2:]).expand(num_images, -1, -1)
703
- cached_neg = cached_neg.view(1, -1, *cached_neg.shape[2:]).expand(num_images, -1, -1)
704
- cached_pos_hs.append(cached_pos)
705
- cached_neg_hs.append(cached_neg)
706
-
707
- if n_pos == 0:
708
- cached_pos_hs = None
709
- if n_neg == 0:
710
- cached_neg_hs = None
711
- else:
712
- cached_pos_hs, cached_neg_hs = None, None
713
- unet_out = self.unet_forward_with_cached_hidden_states(
714
- z_all,
715
- t,
716
- prompt_embd=batched_prompt_embd,
717
- cached_pos_hiddens=cached_pos_hs,
718
- cached_neg_hiddens=cached_neg_hs,
719
- pos_weights=pos_ws,
720
- neg_weights=neg_ws,
721
- )[0]
722
-
723
- noise_cond, noise_uncond = unet_out.chunk(2)
724
- guidance = noise_cond - noise_uncond
725
- noise_pred = noise_uncond + guidance_scale * guidance
726
- latent_noise = self.scheduler.step(noise_pred, t, latent_noise)[0]
727
-
728
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
729
- pbar.update()
730
-
731
- y = self.vae.decode(latent_noise / self.vae.config.scaling_factor, return_dict=False)[0]
732
- imgs = self.image_processor.postprocess(
733
- y,
734
- output_type=output_type,
735
- )
736
-
737
- if not return_dict:
738
- return imgs
739
-
740
- return StableDiffusionPipelineOutput(imgs, False)
741
-
742
- def image_to_tensor(self, image: Union[str, Image.Image], dim: tuple, dtype):
743
- """
744
- Convert latent PIL image to a torch tensor for further processing.
745
- """
746
- if isinstance(image, str):
747
- image = Image.open(image)
748
- if not image.mode == "RGB":
749
- image = image.convert("RGB")
750
- image = self.image_processor.preprocess(image, height=dim[0], width=dim[1])[0]
751
- return image.type(dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_null_text_inversion.py DELETED
@@ -1,260 +0,0 @@
1
- import inspect
2
- import os
3
-
4
- import numpy as np
5
- import torch
6
- import torch.nn.functional as nnf
7
- from PIL import Image
8
- from torch.optim.adam import Adam
9
- from tqdm import tqdm
10
-
11
- from diffusers import StableDiffusionPipeline
12
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
-
14
-
15
- def retrieve_timesteps(
16
- scheduler,
17
- num_inference_steps=None,
18
- device=None,
19
- timesteps=None,
20
- **kwargs,
21
- ):
22
- """
23
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
24
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
25
- Args:
26
- scheduler (`SchedulerMixin`):
27
- The scheduler to get timesteps from.
28
- num_inference_steps (`int`):
29
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
30
- `timesteps` must be `None`.
31
- device (`str` or `torch.device`, *optional*):
32
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
33
- timesteps (`List[int]`, *optional*):
34
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
35
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
36
- must be `None`.
37
-
38
- Returns:
39
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
40
- second element is the number of inference steps.
41
- """
42
- if timesteps is not None:
43
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
44
- if not accepts_timesteps:
45
- raise ValueError(
46
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
47
- f" timestep schedules. Please check whether you are using the correct scheduler."
48
- )
49
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
50
- timesteps = scheduler.timesteps
51
- num_inference_steps = len(timesteps)
52
- else:
53
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
54
- timesteps = scheduler.timesteps
55
- return timesteps, num_inference_steps
56
-
57
-
58
- class NullTextPipeline(StableDiffusionPipeline):
59
- def get_noise_pred(self, latents, t, context):
60
- latents_input = torch.cat([latents] * 2)
61
- guidance_scale = 7.5
62
- noise_pred = self.unet(latents_input, t, encoder_hidden_states=context)["sample"]
63
- noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)
64
- noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
65
- latents = self.prev_step(noise_pred, t, latents)
66
- return latents
67
-
68
- def get_noise_pred_single(self, latents, t, context):
69
- noise_pred = self.unet(latents, t, encoder_hidden_states=context)["sample"]
70
- return noise_pred
71
-
72
- @torch.no_grad()
73
- def image2latent(self, image_path):
74
- image = Image.open(image_path).convert("RGB")
75
- image = np.array(image)
76
- image = torch.from_numpy(image).float() / 127.5 - 1
77
- image = image.permute(2, 0, 1).unsqueeze(0).to(self.device)
78
- latents = self.vae.encode(image)["latent_dist"].mean
79
- latents = latents * 0.18215
80
- return latents
81
-
82
- @torch.no_grad()
83
- def latent2image(self, latents):
84
- latents = 1 / 0.18215 * latents.detach()
85
- image = self.vae.decode(latents)["sample"].detach()
86
- image = self.processor.postprocess(image, output_type="pil")[0]
87
- return image
88
-
89
- def prev_step(self, model_output, timestep, sample):
90
- prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
91
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
92
- alpha_prod_t_prev = (
93
- self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod
94
- )
95
- beta_prod_t = 1 - alpha_prod_t
96
- pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
97
- pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output
98
- prev_sample = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction
99
- return prev_sample
100
-
101
- def next_step(self, model_output, timestep, sample):
102
- timestep, next_timestep = (
103
- min(timestep - self.scheduler.config.num_train_timesteps // self.num_inference_steps, 999),
104
- timestep,
105
- )
106
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
107
- alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep]
108
- beta_prod_t = 1 - alpha_prod_t
109
- next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
110
- next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
111
- next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction
112
- return next_sample
113
-
114
- def null_optimization(self, latents, context, num_inner_steps, epsilon):
115
- uncond_embeddings, cond_embeddings = context.chunk(2)
116
- uncond_embeddings_list = []
117
- latent_cur = latents[-1]
118
- bar = tqdm(total=num_inner_steps * self.num_inference_steps)
119
- for i in range(self.num_inference_steps):
120
- uncond_embeddings = uncond_embeddings.clone().detach()
121
- uncond_embeddings.requires_grad = True
122
- optimizer = Adam([uncond_embeddings], lr=1e-2 * (1.0 - i / 100.0))
123
- latent_prev = latents[len(latents) - i - 2]
124
- t = self.scheduler.timesteps[i]
125
- with torch.no_grad():
126
- noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings)
127
- for j in range(num_inner_steps):
128
- noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings)
129
- noise_pred = noise_pred_uncond + 7.5 * (noise_pred_cond - noise_pred_uncond)
130
- latents_prev_rec = self.prev_step(noise_pred, t, latent_cur)
131
- loss = nnf.mse_loss(latents_prev_rec, latent_prev)
132
- optimizer.zero_grad()
133
- loss.backward()
134
- optimizer.step()
135
- loss_item = loss.item()
136
- bar.update()
137
- if loss_item < epsilon + i * 2e-5:
138
- break
139
- for j in range(j + 1, num_inner_steps):
140
- bar.update()
141
- uncond_embeddings_list.append(uncond_embeddings[:1].detach())
142
- with torch.no_grad():
143
- context = torch.cat([uncond_embeddings, cond_embeddings])
144
- latent_cur = self.get_noise_pred(latent_cur, t, context)
145
- bar.close()
146
- return uncond_embeddings_list
147
-
148
- @torch.no_grad()
149
- def ddim_inversion_loop(self, latent, context):
150
- self.scheduler.set_timesteps(self.num_inference_steps)
151
- _, cond_embeddings = context.chunk(2)
152
- all_latent = [latent]
153
- latent = latent.clone().detach()
154
- with torch.no_grad():
155
- for i in range(0, self.num_inference_steps):
156
- t = self.scheduler.timesteps[len(self.scheduler.timesteps) - i - 1]
157
- noise_pred = self.unet(latent, t, encoder_hidden_states=cond_embeddings)["sample"]
158
- latent = self.next_step(noise_pred, t, latent)
159
- all_latent.append(latent)
160
- return all_latent
161
-
162
- def get_context(self, prompt):
163
- uncond_input = self.tokenizer(
164
- [""], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt"
165
- )
166
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
167
- text_input = self.tokenizer(
168
- [prompt],
169
- padding="max_length",
170
- max_length=self.tokenizer.model_max_length,
171
- truncation=True,
172
- return_tensors="pt",
173
- )
174
- text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
175
- context = torch.cat([uncond_embeddings, text_embeddings])
176
- return context
177
-
178
- def invert(
179
- self, image_path: str, prompt: str, num_inner_steps=10, early_stop_epsilon=1e-6, num_inference_steps=50
180
- ):
181
- self.num_inference_steps = num_inference_steps
182
- context = self.get_context(prompt)
183
- latent = self.image2latent(image_path)
184
- ddim_latents = self.ddim_inversion_loop(latent, context)
185
- if os.path.exists(image_path + ".pt"):
186
- uncond_embeddings = torch.load(image_path + ".pt")
187
- else:
188
- uncond_embeddings = self.null_optimization(ddim_latents, context, num_inner_steps, early_stop_epsilon)
189
- uncond_embeddings = torch.stack(uncond_embeddings, 0)
190
- torch.save(uncond_embeddings, image_path + ".pt")
191
- return ddim_latents[-1], uncond_embeddings
192
-
193
- @torch.no_grad()
194
- def __call__(
195
- self,
196
- prompt,
197
- uncond_embeddings,
198
- inverted_latent,
199
- num_inference_steps: int = 50,
200
- timesteps=None,
201
- guidance_scale=7.5,
202
- negative_prompt=None,
203
- num_images_per_prompt=1,
204
- generator=None,
205
- latents=None,
206
- prompt_embeds=None,
207
- negative_prompt_embeds=None,
208
- output_type="pil",
209
- ):
210
- self._guidance_scale = guidance_scale
211
- # 0. Default height and width to unet
212
- height = self.unet.config.sample_size * self.vae_scale_factor
213
- width = self.unet.config.sample_size * self.vae_scale_factor
214
- # to deal with lora scaling and other possible forward hook
215
- callback_steps = None
216
- # 1. Check inputs. Raise error if not correct
217
- self.check_inputs(
218
- prompt,
219
- height,
220
- width,
221
- callback_steps,
222
- negative_prompt,
223
- prompt_embeds,
224
- negative_prompt_embeds,
225
- )
226
- # 2. Define call parameter
227
- device = self._execution_device
228
- # 3. Encode input prompt
229
- prompt_embeds, _ = self.encode_prompt(
230
- prompt,
231
- device,
232
- num_images_per_prompt,
233
- self.do_classifier_free_guidance,
234
- negative_prompt,
235
- prompt_embeds=prompt_embeds,
236
- negative_prompt_embeds=negative_prompt_embeds,
237
- )
238
- # 4. Prepare timesteps
239
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
240
- latents = inverted_latent
241
- with self.progress_bar(total=num_inference_steps) as progress_bar:
242
- for i, t in enumerate(timesteps):
243
- noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=uncond_embeddings[i])["sample"]
244
- noise_pred = self.unet(latents, t, encoder_hidden_states=prompt_embeds)["sample"]
245
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
246
- # compute the previous noisy sample x_t -> x_t-1
247
- latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
248
- progress_bar.update()
249
- if not output_type == "latent":
250
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
251
- 0
252
- ]
253
- else:
254
- image = latents
255
- image = self.image_processor.postprocess(
256
- image, output_type=output_type, do_denormalize=[True] * image.shape[0]
257
- )
258
- # Offload all models
259
- self.maybe_free_model_hooks()
260
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_prompt2prompt.py DELETED
@@ -1,1422 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from __future__ import annotations
16
-
17
- import abc
18
- import inspect
19
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
20
-
21
- import numpy as np
22
- import torch
23
- import torch.nn.functional as F
24
- from packaging import version
25
- from transformers import (
26
- CLIPImageProcessor,
27
- CLIPTextModel,
28
- CLIPTokenizer,
29
- CLIPVisionModelWithProjection,
30
- )
31
-
32
- from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel
33
- from diffusers.configuration_utils import FrozenDict, deprecate
34
- from diffusers.image_processor import VaeImageProcessor
35
- from diffusers.loaders import (
36
- FromSingleFileMixin,
37
- IPAdapterMixin,
38
- LoraLoaderMixin,
39
- TextualInversionLoaderMixin,
40
- )
41
- from diffusers.models.attention import Attention
42
- from diffusers.models.lora import adjust_lora_scale_text_encoder
43
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
44
- from diffusers.pipelines.stable_diffusion.safety_checker import (
45
- StableDiffusionSafetyChecker,
46
- )
47
- from diffusers.schedulers import KarrasDiffusionSchedulers
48
- from diffusers.utils import (
49
- USE_PEFT_BACKEND,
50
- logging,
51
- scale_lora_layers,
52
- unscale_lora_layers,
53
- )
54
- from diffusers.utils.torch_utils import randn_tensor
55
-
56
-
57
- logger = logging.get_logger(__name__)
58
-
59
-
60
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
61
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
62
- """
63
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
64
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
65
- """
66
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
67
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
68
- # rescale the results from guidance (fixes overexposure)
69
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
70
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
71
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
72
- return noise_cfg
73
-
74
-
75
- class Prompt2PromptPipeline(
76
- DiffusionPipeline,
77
- TextualInversionLoaderMixin,
78
- LoraLoaderMixin,
79
- IPAdapterMixin,
80
- FromSingleFileMixin,
81
- ):
82
- r"""
83
- Pipeline for text-to-image generation using Stable Diffusion.
84
-
85
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
86
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
87
-
88
- The pipeline also inherits the following loading methods:
89
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
90
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
91
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
92
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
93
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
94
-
95
- Args:
96
- vae ([`AutoencoderKL`]):
97
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
98
- text_encoder ([`~transformers.CLIPTextModel`]):
99
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
100
- tokenizer ([`~transformers.CLIPTokenizer`]):
101
- A `CLIPTokenizer` to tokenize text.
102
- unet ([`UNet2DConditionModel`]):
103
- A `UNet2DConditionModel` to denoise the encoded image latents.
104
- scheduler ([`SchedulerMixin`]):
105
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
106
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
107
- safety_checker ([`StableDiffusionSafetyChecker`]):
108
- Classification module that estimates whether generated images could be considered offensive or harmful.
109
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
110
- about a model's potential harms.
111
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
112
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
113
- """
114
-
115
- model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
116
- _exclude_from_cpu_offload = ["safety_checker"]
117
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
118
- _optional_components = ["safety_checker", "feature_extractor"]
119
-
120
- def __init__(
121
- self,
122
- vae: AutoencoderKL,
123
- text_encoder: CLIPTextModel,
124
- tokenizer: CLIPTokenizer,
125
- unet: UNet2DConditionModel,
126
- scheduler: KarrasDiffusionSchedulers,
127
- safety_checker: StableDiffusionSafetyChecker,
128
- feature_extractor: CLIPImageProcessor,
129
- image_encoder: CLIPVisionModelWithProjection = None,
130
- requires_safety_checker: bool = True,
131
- ):
132
- super().__init__()
133
-
134
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
135
- deprecation_message = (
136
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
137
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
138
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
139
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
140
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
141
- " file"
142
- )
143
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
144
- new_config = dict(scheduler.config)
145
- new_config["steps_offset"] = 1
146
- scheduler._internal_dict = FrozenDict(new_config)
147
-
148
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
149
- deprecation_message = (
150
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
151
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
152
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
153
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
154
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
155
- )
156
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
157
- new_config = dict(scheduler.config)
158
- new_config["clip_sample"] = False
159
- scheduler._internal_dict = FrozenDict(new_config)
160
-
161
- if safety_checker is None and requires_safety_checker:
162
- logger.warning(
163
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
164
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
165
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
166
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
167
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
168
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
169
- )
170
-
171
- if safety_checker is not None and feature_extractor is None:
172
- raise ValueError(
173
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
174
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
175
- )
176
-
177
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
178
- version.parse(unet.config._diffusers_version).base_version
179
- ) < version.parse("0.9.0.dev0")
180
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
181
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
182
- deprecation_message = (
183
- "The configuration file of the unet has set the default `sample_size` to smaller than"
184
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
185
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
186
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
187
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
188
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
189
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
190
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
191
- " the `unet/config.json` file"
192
- )
193
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
194
- new_config = dict(unet.config)
195
- new_config["sample_size"] = 64
196
- unet._internal_dict = FrozenDict(new_config)
197
-
198
- self.register_modules(
199
- vae=vae,
200
- text_encoder=text_encoder,
201
- tokenizer=tokenizer,
202
- unet=unet,
203
- scheduler=scheduler,
204
- safety_checker=safety_checker,
205
- feature_extractor=feature_extractor,
206
- image_encoder=image_encoder,
207
- )
208
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
209
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
210
- self.register_to_config(requires_safety_checker=requires_safety_checker)
211
-
212
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
213
- def _encode_prompt(
214
- self,
215
- prompt,
216
- device,
217
- num_images_per_prompt,
218
- do_classifier_free_guidance,
219
- negative_prompt=None,
220
- prompt_embeds: Optional[torch.Tensor] = None,
221
- negative_prompt_embeds: Optional[torch.Tensor] = None,
222
- lora_scale: Optional[float] = None,
223
- **kwargs,
224
- ):
225
- deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
226
- deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
227
-
228
- prompt_embeds_tuple = self.encode_prompt(
229
- prompt=prompt,
230
- device=device,
231
- num_images_per_prompt=num_images_per_prompt,
232
- do_classifier_free_guidance=do_classifier_free_guidance,
233
- negative_prompt=negative_prompt,
234
- prompt_embeds=prompt_embeds,
235
- negative_prompt_embeds=negative_prompt_embeds,
236
- lora_scale=lora_scale,
237
- **kwargs,
238
- )
239
-
240
- # concatenate for backwards comp
241
- prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
242
-
243
- return prompt_embeds
244
-
245
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
246
- def encode_prompt(
247
- self,
248
- prompt,
249
- device,
250
- num_images_per_prompt,
251
- do_classifier_free_guidance,
252
- negative_prompt=None,
253
- prompt_embeds: Optional[torch.Tensor] = None,
254
- negative_prompt_embeds: Optional[torch.Tensor] = None,
255
- lora_scale: Optional[float] = None,
256
- clip_skip: Optional[int] = None,
257
- ):
258
- r"""
259
- Encodes the prompt into text encoder hidden states.
260
-
261
- Args:
262
- prompt (`str` or `List[str]`, *optional*):
263
- prompt to be encoded
264
- device: (`torch.device`):
265
- torch device
266
- num_images_per_prompt (`int`):
267
- number of images that should be generated per prompt
268
- do_classifier_free_guidance (`bool`):
269
- whether to use classifier free guidance or not
270
- negative_prompt (`str` or `List[str]`, *optional*):
271
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
272
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
273
- less than `1`).
274
- prompt_embeds (`torch.Tensor`, *optional*):
275
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
276
- provided, text embeddings will be generated from `prompt` input argument.
277
- negative_prompt_embeds (`torch.Tensor`, *optional*):
278
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
279
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
280
- argument.
281
- lora_scale (`float`, *optional*):
282
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
283
- clip_skip (`int`, *optional*):
284
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
285
- the output of the pre-final layer will be used for computing the prompt embeddings.
286
- """
287
- # set lora scale so that monkey patched LoRA
288
- # function of text encoder can correctly access it
289
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
290
- self._lora_scale = lora_scale
291
-
292
- # dynamically adjust the LoRA scale
293
- if not USE_PEFT_BACKEND:
294
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
295
- else:
296
- scale_lora_layers(self.text_encoder, lora_scale)
297
-
298
- if prompt is not None and isinstance(prompt, str):
299
- batch_size = 1
300
- elif prompt is not None and isinstance(prompt, list):
301
- batch_size = len(prompt)
302
- else:
303
- batch_size = prompt_embeds.shape[0]
304
-
305
- if prompt_embeds is None:
306
- # textual inversion: process multi-vector tokens if necessary
307
- if isinstance(self, TextualInversionLoaderMixin):
308
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
309
-
310
- text_inputs = self.tokenizer(
311
- prompt,
312
- padding="max_length",
313
- max_length=self.tokenizer.model_max_length,
314
- truncation=True,
315
- return_tensors="pt",
316
- )
317
- text_input_ids = text_inputs.input_ids
318
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
319
-
320
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
321
- text_input_ids, untruncated_ids
322
- ):
323
- removed_text = self.tokenizer.batch_decode(
324
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
325
- )
326
- logger.warning(
327
- "The following part of your input was truncated because CLIP can only handle sequences up to"
328
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
329
- )
330
-
331
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
332
- attention_mask = text_inputs.attention_mask.to(device)
333
- else:
334
- attention_mask = None
335
-
336
- if clip_skip is None:
337
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
338
- prompt_embeds = prompt_embeds[0]
339
- else:
340
- prompt_embeds = self.text_encoder(
341
- text_input_ids.to(device),
342
- attention_mask=attention_mask,
343
- output_hidden_states=True,
344
- )
345
- # Access the `hidden_states` first, that contains a tuple of
346
- # all the hidden states from the encoder layers. Then index into
347
- # the tuple to access the hidden states from the desired layer.
348
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
349
- # We also need to apply the final LayerNorm here to not mess with the
350
- # representations. The `last_hidden_states` that we typically use for
351
- # obtaining the final prompt representations passes through the LayerNorm
352
- # layer.
353
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
354
-
355
- if self.text_encoder is not None:
356
- prompt_embeds_dtype = self.text_encoder.dtype
357
- elif self.unet is not None:
358
- prompt_embeds_dtype = self.unet.dtype
359
- else:
360
- prompt_embeds_dtype = prompt_embeds.dtype
361
-
362
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
363
-
364
- bs_embed, seq_len, _ = prompt_embeds.shape
365
- # duplicate text embeddings for each generation per prompt, using mps friendly method
366
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
367
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
368
-
369
- # get unconditional embeddings for classifier free guidance
370
- if do_classifier_free_guidance and negative_prompt_embeds is None:
371
- uncond_tokens: List[str]
372
- if negative_prompt is None:
373
- uncond_tokens = [""] * batch_size
374
- elif prompt is not None and type(prompt) is not type(negative_prompt):
375
- raise TypeError(
376
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
377
- f" {type(prompt)}."
378
- )
379
- elif isinstance(negative_prompt, str):
380
- uncond_tokens = [negative_prompt]
381
- elif batch_size != len(negative_prompt):
382
- raise ValueError(
383
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
384
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
385
- " the batch size of `prompt`."
386
- )
387
- else:
388
- uncond_tokens = negative_prompt
389
-
390
- # textual inversion: process multi-vector tokens if necessary
391
- if isinstance(self, TextualInversionLoaderMixin):
392
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
393
-
394
- max_length = prompt_embeds.shape[1]
395
- uncond_input = self.tokenizer(
396
- uncond_tokens,
397
- padding="max_length",
398
- max_length=max_length,
399
- truncation=True,
400
- return_tensors="pt",
401
- )
402
-
403
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
404
- attention_mask = uncond_input.attention_mask.to(device)
405
- else:
406
- attention_mask = None
407
-
408
- negative_prompt_embeds = self.text_encoder(
409
- uncond_input.input_ids.to(device),
410
- attention_mask=attention_mask,
411
- )
412
- negative_prompt_embeds = negative_prompt_embeds[0]
413
-
414
- if do_classifier_free_guidance:
415
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
416
- seq_len = negative_prompt_embeds.shape[1]
417
-
418
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
419
-
420
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
421
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
422
-
423
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
424
- # Retrieve the original scale by scaling back the LoRA layers
425
- unscale_lora_layers(self.text_encoder, lora_scale)
426
-
427
- return prompt_embeds, negative_prompt_embeds
428
-
429
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
430
- def run_safety_checker(self, image, device, dtype):
431
- if self.safety_checker is None:
432
- has_nsfw_concept = None
433
- else:
434
- if torch.is_tensor(image):
435
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
436
- else:
437
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
438
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
439
- image, has_nsfw_concept = self.safety_checker(
440
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
441
- )
442
- return image, has_nsfw_concept
443
-
444
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
445
- def prepare_extra_step_kwargs(self, generator, eta):
446
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
447
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
448
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
449
- # and should be between [0, 1]
450
-
451
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
452
- extra_step_kwargs = {}
453
- if accepts_eta:
454
- extra_step_kwargs["eta"] = eta
455
-
456
- # check if the scheduler accepts generator
457
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
458
- if accepts_generator:
459
- extra_step_kwargs["generator"] = generator
460
- return extra_step_kwargs
461
-
462
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
463
- def check_inputs(
464
- self,
465
- prompt,
466
- height,
467
- width,
468
- callback_steps,
469
- negative_prompt=None,
470
- prompt_embeds=None,
471
- negative_prompt_embeds=None,
472
- ip_adapter_image=None,
473
- ip_adapter_image_embeds=None,
474
- callback_on_step_end_tensor_inputs=None,
475
- ):
476
- if height % 8 != 0 or width % 8 != 0:
477
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
478
-
479
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
480
- raise ValueError(
481
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
482
- f" {type(callback_steps)}."
483
- )
484
- if callback_on_step_end_tensor_inputs is not None and not all(
485
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
486
- ):
487
- raise ValueError(
488
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
489
- )
490
-
491
- if prompt is not None and prompt_embeds is not None:
492
- raise ValueError(
493
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
494
- " only forward one of the two."
495
- )
496
- elif prompt is None and prompt_embeds is None:
497
- raise ValueError(
498
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
499
- )
500
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
501
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
502
-
503
- if negative_prompt is not None and negative_prompt_embeds is not None:
504
- raise ValueError(
505
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
506
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
507
- )
508
-
509
- if prompt_embeds is not None and negative_prompt_embeds is not None:
510
- if prompt_embeds.shape != negative_prompt_embeds.shape:
511
- raise ValueError(
512
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
513
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
514
- f" {negative_prompt_embeds.shape}."
515
- )
516
-
517
- if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
518
- raise ValueError(
519
- "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
520
- )
521
-
522
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
523
- def prepare_latents(
524
- self,
525
- batch_size,
526
- num_channels_latents,
527
- height,
528
- width,
529
- dtype,
530
- device,
531
- generator,
532
- latents=None,
533
- ):
534
- shape = (
535
- batch_size,
536
- num_channels_latents,
537
- height // self.vae_scale_factor,
538
- width // self.vae_scale_factor,
539
- )
540
- if isinstance(generator, list) and len(generator) != batch_size:
541
- raise ValueError(
542
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
543
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
544
- )
545
-
546
- if latents is None:
547
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
548
- else:
549
- latents = latents.to(device)
550
-
551
- # scale the initial noise by the standard deviation required by the scheduler
552
- latents = latents * self.scheduler.init_noise_sigma
553
- return latents
554
-
555
- @torch.no_grad()
556
- def __call__(
557
- self,
558
- prompt: Union[str, List[str]],
559
- height: Optional[int] = None,
560
- width: Optional[int] = None,
561
- num_inference_steps: int = 50,
562
- guidance_scale: float = 7.5,
563
- negative_prompt: Optional[Union[str, List[str]]] = None,
564
- num_images_per_prompt: Optional[int] = 1,
565
- eta: float = 0.0,
566
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
567
- latents: Optional[torch.Tensor] = None,
568
- prompt_embeds: Optional[torch.Tensor] = None,
569
- negative_prompt_embeds: Optional[torch.Tensor] = None,
570
- output_type: Optional[str] = "pil",
571
- return_dict: bool = True,
572
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
573
- callback_steps: Optional[int] = 1,
574
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
575
- guidance_rescale: float = 0.0,
576
- ):
577
- r"""
578
- Function invoked when calling the pipeline for generation.
579
-
580
- Args:
581
- prompt (`str` or `List[str]`):
582
- The prompt or prompts to guide the image generation.
583
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
584
- The height in pixels of the generated image.
585
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
586
- The width in pixels of the generated image.
587
- num_inference_steps (`int`, *optional*, defaults to 50):
588
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
589
- expense of slower inference.
590
- guidance_scale (`float`, *optional*, defaults to 7.5):
591
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
592
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
593
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
594
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
595
- usually at the expense of lower image quality.
596
- negative_prompt (`str` or `List[str]`, *optional*):
597
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
598
- if `guidance_scale` is less than `1`).
599
- num_images_per_prompt (`int`, *optional*, defaults to 1):
600
- The number of images to generate per prompt.
601
- eta (`float`, *optional*, defaults to 0.0):
602
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
603
- [`schedulers.DDIMScheduler`], will be ignored for others.
604
- generator (`torch.Generator`, *optional*):
605
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
606
- to make generation deterministic.
607
- latents (`torch.Tensor`, *optional*):
608
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
609
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
610
- tensor will ge generated by sampling using the supplied random `generator`.
611
- output_type (`str`, *optional*, defaults to `"pil"`):
612
- The output format of the generate image. Choose between
613
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
614
- return_dict (`bool`, *optional*, defaults to `True`):
615
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
616
- plain tuple.
617
- callback (`Callable`, *optional*):
618
- A function that will be called every `callback_steps` steps during inference. The function will be
619
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
620
- callback_steps (`int`, *optional*, defaults to 1):
621
- The frequency at which the `callback` function will be called. If not specified, the callback will be
622
- called at every step.
623
- cross_attention_kwargs (`dict`, *optional*):
624
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
625
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
626
-
627
- The keyword arguments to configure the edit are:
628
- - edit_type (`str`). The edit type to apply. Can be either of `replace`, `refine`, `reweight`.
629
- - n_cross_replace (`int`): Number of diffusion steps in which cross attention should be replaced
630
- - n_self_replace (`int`): Number of diffusion steps in which self attention should be replaced
631
- - local_blend_words(`List[str]`, *optional*, default to `None`): Determines which area should be
632
- changed. If None, then the whole image can be changed.
633
- - equalizer_words(`List[str]`, *optional*, default to `None`): Required for edit type `reweight`.
634
- Determines which words should be enhanced.
635
- - equalizer_strengths (`List[float]`, *optional*, default to `None`) Required for edit type `reweight`.
636
- Determines which how much the words in `equalizer_words` should be enhanced.
637
-
638
- guidance_rescale (`float`, *optional*, defaults to 0.0):
639
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
640
- Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
641
- using zero terminal SNR.
642
-
643
- Returns:
644
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
645
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
646
- When returning a tuple, the first element is a list with the generated images, and the second element is a
647
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
648
- (nsfw) content, according to the `safety_checker`.
649
- """
650
-
651
- self.controller = create_controller(
652
- prompt,
653
- cross_attention_kwargs,
654
- num_inference_steps,
655
- tokenizer=self.tokenizer,
656
- device=self.device,
657
- )
658
- self.register_attention_control(self.controller) # add attention controller
659
-
660
- # 0. Default height and width to unet
661
- height = height or self.unet.config.sample_size * self.vae_scale_factor
662
- width = width or self.unet.config.sample_size * self.vae_scale_factor
663
-
664
- # 1. Check inputs. Raise error if not correct
665
- self.check_inputs(prompt, height, width, callback_steps)
666
-
667
- # 2. Define call parameters
668
- if prompt is not None and isinstance(prompt, str):
669
- batch_size = 1
670
- elif prompt is not None and isinstance(prompt, list):
671
- batch_size = len(prompt)
672
- else:
673
- batch_size = prompt_embeds.shape[0]
674
-
675
- device = self._execution_device
676
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
677
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
678
- # corresponds to doing no classifier free guidance.
679
- do_classifier_free_guidance = guidance_scale > 1.0
680
-
681
- # 3. Encode input prompt
682
- text_encoder_lora_scale = (
683
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
684
- )
685
- prompt_embeds = self._encode_prompt(
686
- prompt,
687
- device,
688
- num_images_per_prompt,
689
- do_classifier_free_guidance,
690
- negative_prompt,
691
- prompt_embeds=prompt_embeds,
692
- negative_prompt_embeds=negative_prompt_embeds,
693
- lora_scale=text_encoder_lora_scale,
694
- )
695
-
696
- # 4. Prepare timesteps
697
- self.scheduler.set_timesteps(num_inference_steps, device=device)
698
- timesteps = self.scheduler.timesteps
699
-
700
- # 5. Prepare latent variables
701
- num_channels_latents = self.unet.config.in_channels
702
- latents = self.prepare_latents(
703
- batch_size * num_images_per_prompt,
704
- num_channels_latents,
705
- height,
706
- width,
707
- prompt_embeds.dtype,
708
- device,
709
- generator,
710
- latents,
711
- )
712
-
713
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
714
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
715
-
716
- # 7. Denoising loop
717
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
718
- with self.progress_bar(total=num_inference_steps) as progress_bar:
719
- for i, t in enumerate(timesteps):
720
- # expand the latents if we are doing classifier free guidance
721
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
722
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
723
-
724
- # predict the noise residual
725
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
726
-
727
- # perform guidance
728
- if do_classifier_free_guidance:
729
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
730
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
731
-
732
- if do_classifier_free_guidance and guidance_rescale > 0.0:
733
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
734
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
735
-
736
- # compute the previous noisy sample x_t -> x_t-1
737
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
738
-
739
- # step callback
740
- latents = self.controller.step_callback(latents)
741
-
742
- # call the callback, if provided
743
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
744
- progress_bar.update()
745
- if callback is not None and i % callback_steps == 0:
746
- step_idx = i // getattr(self.scheduler, "order", 1)
747
- callback(step_idx, t, latents)
748
-
749
- # 8. Post-processing
750
- if not output_type == "latent":
751
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
752
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
753
- else:
754
- image = latents
755
- has_nsfw_concept = None
756
-
757
- # 9. Run safety checker
758
- if has_nsfw_concept is None:
759
- do_denormalize = [True] * image.shape[0]
760
- else:
761
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
762
-
763
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
764
-
765
- # Offload last model to CPU
766
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
767
- self.final_offload_hook.offload()
768
-
769
- if not return_dict:
770
- return (image, has_nsfw_concept)
771
-
772
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
773
-
774
- def register_attention_control(self, controller):
775
- attn_procs = {}
776
- cross_att_count = 0
777
- for name in self.unet.attn_processors.keys():
778
- (None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim)
779
- if name.startswith("mid_block"):
780
- self.unet.config.block_out_channels[-1]
781
- place_in_unet = "mid"
782
- elif name.startswith("up_blocks"):
783
- block_id = int(name[len("up_blocks.")])
784
- list(reversed(self.unet.config.block_out_channels))[block_id]
785
- place_in_unet = "up"
786
- elif name.startswith("down_blocks"):
787
- block_id = int(name[len("down_blocks.")])
788
- self.unet.config.block_out_channels[block_id]
789
- place_in_unet = "down"
790
- else:
791
- continue
792
- cross_att_count += 1
793
- attn_procs[name] = P2PCrossAttnProcessor(controller=controller, place_in_unet=place_in_unet)
794
-
795
- self.unet.set_attn_processor(attn_procs)
796
- controller.num_att_layers = cross_att_count
797
-
798
-
799
- class P2PCrossAttnProcessor:
800
- def __init__(self, controller, place_in_unet):
801
- super().__init__()
802
- self.controller = controller
803
- self.place_in_unet = place_in_unet
804
-
805
- def __call__(
806
- self,
807
- attn: Attention,
808
- hidden_states,
809
- encoder_hidden_states=None,
810
- attention_mask=None,
811
- ):
812
- batch_size, sequence_length, _ = hidden_states.shape
813
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
814
-
815
- query = attn.to_q(hidden_states)
816
-
817
- is_cross = encoder_hidden_states is not None
818
- encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
819
- key = attn.to_k(encoder_hidden_states)
820
- value = attn.to_v(encoder_hidden_states)
821
-
822
- query = attn.head_to_batch_dim(query)
823
- key = attn.head_to_batch_dim(key)
824
- value = attn.head_to_batch_dim(value)
825
-
826
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
827
-
828
- # one line change
829
- self.controller(attention_probs, is_cross, self.place_in_unet)
830
-
831
- hidden_states = torch.bmm(attention_probs, value)
832
- hidden_states = attn.batch_to_head_dim(hidden_states)
833
-
834
- # linear proj
835
- hidden_states = attn.to_out[0](hidden_states)
836
- # dropout
837
- hidden_states = attn.to_out[1](hidden_states)
838
-
839
- return hidden_states
840
-
841
-
842
- def create_controller(
843
- prompts: List[str],
844
- cross_attention_kwargs: Dict,
845
- num_inference_steps: int,
846
- tokenizer,
847
- device,
848
- ) -> AttentionControl:
849
- edit_type = cross_attention_kwargs.get("edit_type", None)
850
- local_blend_words = cross_attention_kwargs.get("local_blend_words", None)
851
- equalizer_words = cross_attention_kwargs.get("equalizer_words", None)
852
- equalizer_strengths = cross_attention_kwargs.get("equalizer_strengths", None)
853
- n_cross_replace = cross_attention_kwargs.get("n_cross_replace", 0.4)
854
- n_self_replace = cross_attention_kwargs.get("n_self_replace", 0.4)
855
-
856
- # only replace
857
- if edit_type == "replace" and local_blend_words is None:
858
- return AttentionReplace(
859
- prompts,
860
- num_inference_steps,
861
- n_cross_replace,
862
- n_self_replace,
863
- tokenizer=tokenizer,
864
- device=device,
865
- )
866
-
867
- # replace + localblend
868
- if edit_type == "replace" and local_blend_words is not None:
869
- lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device)
870
- return AttentionReplace(
871
- prompts,
872
- num_inference_steps,
873
- n_cross_replace,
874
- n_self_replace,
875
- lb,
876
- tokenizer=tokenizer,
877
- device=device,
878
- )
879
-
880
- # only refine
881
- if edit_type == "refine" and local_blend_words is None:
882
- return AttentionRefine(
883
- prompts,
884
- num_inference_steps,
885
- n_cross_replace,
886
- n_self_replace,
887
- tokenizer=tokenizer,
888
- device=device,
889
- )
890
-
891
- # refine + localblend
892
- if edit_type == "refine" and local_blend_words is not None:
893
- lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device)
894
- return AttentionRefine(
895
- prompts,
896
- num_inference_steps,
897
- n_cross_replace,
898
- n_self_replace,
899
- lb,
900
- tokenizer=tokenizer,
901
- device=device,
902
- )
903
-
904
- # reweight
905
- if edit_type == "reweight":
906
- assert (
907
- equalizer_words is not None and equalizer_strengths is not None
908
- ), "To use reweight edit, please specify equalizer_words and equalizer_strengths."
909
- assert len(equalizer_words) == len(
910
- equalizer_strengths
911
- ), "equalizer_words and equalizer_strengths must be of same length."
912
- equalizer = get_equalizer(prompts[1], equalizer_words, equalizer_strengths, tokenizer=tokenizer)
913
- return AttentionReweight(
914
- prompts,
915
- num_inference_steps,
916
- n_cross_replace,
917
- n_self_replace,
918
- tokenizer=tokenizer,
919
- device=device,
920
- equalizer=equalizer,
921
- )
922
-
923
- raise ValueError(f"Edit type {edit_type} not recognized. Use one of: replace, refine, reweight.")
924
-
925
-
926
- class AttentionControl(abc.ABC):
927
- def step_callback(self, x_t):
928
- return x_t
929
-
930
- def between_steps(self):
931
- return
932
-
933
- @property
934
- def num_uncond_att_layers(self):
935
- return 0
936
-
937
- @abc.abstractmethod
938
- def forward(self, attn, is_cross: bool, place_in_unet: str):
939
- raise NotImplementedError
940
-
941
- def __call__(self, attn, is_cross: bool, place_in_unet: str):
942
- if self.cur_att_layer >= self.num_uncond_att_layers:
943
- h = attn.shape[0]
944
- attn[h // 2 :] = self.forward(attn[h // 2 :], is_cross, place_in_unet)
945
- self.cur_att_layer += 1
946
- if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:
947
- self.cur_att_layer = 0
948
- self.cur_step += 1
949
- self.between_steps()
950
- return attn
951
-
952
- def reset(self):
953
- self.cur_step = 0
954
- self.cur_att_layer = 0
955
-
956
- def __init__(self):
957
- self.cur_step = 0
958
- self.num_att_layers = -1
959
- self.cur_att_layer = 0
960
-
961
-
962
- class EmptyControl(AttentionControl):
963
- def forward(self, attn, is_cross: bool, place_in_unet: str):
964
- return attn
965
-
966
-
967
- class AttentionStore(AttentionControl):
968
- @staticmethod
969
- def get_empty_store():
970
- return {
971
- "down_cross": [],
972
- "mid_cross": [],
973
- "up_cross": [],
974
- "down_self": [],
975
- "mid_self": [],
976
- "up_self": [],
977
- }
978
-
979
- def forward(self, attn, is_cross: bool, place_in_unet: str):
980
- key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
981
- if attn.shape[1] <= 32**2: # avoid memory overhead
982
- self.step_store[key].append(attn)
983
- return attn
984
-
985
- def between_steps(self):
986
- if len(self.attention_store) == 0:
987
- self.attention_store = self.step_store
988
- else:
989
- for key in self.attention_store:
990
- for i in range(len(self.attention_store[key])):
991
- self.attention_store[key][i] += self.step_store[key][i]
992
- self.step_store = self.get_empty_store()
993
-
994
- def get_average_attention(self):
995
- average_attention = {
996
- key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store
997
- }
998
- return average_attention
999
-
1000
- def reset(self):
1001
- super(AttentionStore, self).reset()
1002
- self.step_store = self.get_empty_store()
1003
- self.attention_store = {}
1004
-
1005
- def __init__(self):
1006
- super(AttentionStore, self).__init__()
1007
- self.step_store = self.get_empty_store()
1008
- self.attention_store = {}
1009
-
1010
-
1011
- class LocalBlend:
1012
- def __call__(self, x_t, attention_store):
1013
- k = 1
1014
- maps = attention_store["down_cross"][2:4] + attention_store["up_cross"][:3]
1015
- maps = [item.reshape(self.alpha_layers.shape[0], -1, 1, 16, 16, self.max_num_words) for item in maps]
1016
- maps = torch.cat(maps, dim=1)
1017
- maps = (maps * self.alpha_layers).sum(-1).mean(1)
1018
- mask = F.max_pool2d(maps, (k * 2 + 1, k * 2 + 1), (1, 1), padding=(k, k))
1019
- mask = F.interpolate(mask, size=(x_t.shape[2:]))
1020
- mask = mask / mask.max(2, keepdims=True)[0].max(3, keepdims=True)[0]
1021
- mask = mask.gt(self.threshold)
1022
- mask = (mask[:1] + mask[1:]).float()
1023
- x_t = x_t[:1] + mask * (x_t - x_t[:1])
1024
- return x_t
1025
-
1026
- def __init__(
1027
- self,
1028
- prompts: List[str],
1029
- words: [List[List[str]]],
1030
- tokenizer,
1031
- device,
1032
- threshold=0.3,
1033
- max_num_words=77,
1034
- ):
1035
- self.max_num_words = 77
1036
-
1037
- alpha_layers = torch.zeros(len(prompts), 1, 1, 1, 1, self.max_num_words)
1038
- for i, (prompt, words_) in enumerate(zip(prompts, words)):
1039
- if isinstance(words_, str):
1040
- words_ = [words_]
1041
- for word in words_:
1042
- ind = get_word_inds(prompt, word, tokenizer)
1043
- alpha_layers[i, :, :, :, :, ind] = 1
1044
- self.alpha_layers = alpha_layers.to(device)
1045
- self.threshold = threshold
1046
-
1047
-
1048
- class AttentionControlEdit(AttentionStore, abc.ABC):
1049
- def step_callback(self, x_t):
1050
- if self.local_blend is not None:
1051
- x_t = self.local_blend(x_t, self.attention_store)
1052
- return x_t
1053
-
1054
- def replace_self_attention(self, attn_base, att_replace):
1055
- if att_replace.shape[2] <= 16**2:
1056
- return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape)
1057
- else:
1058
- return att_replace
1059
-
1060
- @abc.abstractmethod
1061
- def replace_cross_attention(self, attn_base, att_replace):
1062
- raise NotImplementedError
1063
-
1064
- def forward(self, attn, is_cross: bool, place_in_unet: str):
1065
- super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet)
1066
- # FIXME not replace correctly
1067
- if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]):
1068
- h = attn.shape[0] // (self.batch_size)
1069
- attn = attn.reshape(self.batch_size, h, *attn.shape[1:])
1070
- attn_base, attn_repalce = attn[0], attn[1:]
1071
- if is_cross:
1072
- alpha_words = self.cross_replace_alpha[self.cur_step]
1073
- attn_repalce_new = (
1074
- self.replace_cross_attention(attn_base, attn_repalce) * alpha_words
1075
- + (1 - alpha_words) * attn_repalce
1076
- )
1077
- attn[1:] = attn_repalce_new
1078
- else:
1079
- attn[1:] = self.replace_self_attention(attn_base, attn_repalce)
1080
- attn = attn.reshape(self.batch_size * h, *attn.shape[2:])
1081
- return attn
1082
-
1083
- def __init__(
1084
- self,
1085
- prompts,
1086
- num_steps: int,
1087
- cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],
1088
- self_replace_steps: Union[float, Tuple[float, float]],
1089
- local_blend: Optional[LocalBlend],
1090
- tokenizer,
1091
- device,
1092
- ):
1093
- super(AttentionControlEdit, self).__init__()
1094
- # add tokenizer and device here
1095
-
1096
- self.tokenizer = tokenizer
1097
- self.device = device
1098
-
1099
- self.batch_size = len(prompts)
1100
- self.cross_replace_alpha = get_time_words_attention_alpha(
1101
- prompts, num_steps, cross_replace_steps, self.tokenizer
1102
- ).to(self.device)
1103
- if isinstance(self_replace_steps, float):
1104
- self_replace_steps = 0, self_replace_steps
1105
- self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1])
1106
- self.local_blend = local_blend # 在外面定义后传进来
1107
-
1108
-
1109
- class AttentionReplace(AttentionControlEdit):
1110
- def replace_cross_attention(self, attn_base, att_replace):
1111
- return torch.einsum("hpw,bwn->bhpn", attn_base, self.mapper)
1112
-
1113
- def __init__(
1114
- self,
1115
- prompts,
1116
- num_steps: int,
1117
- cross_replace_steps: float,
1118
- self_replace_steps: float,
1119
- local_blend: Optional[LocalBlend] = None,
1120
- tokenizer=None,
1121
- device=None,
1122
- ):
1123
- super(AttentionReplace, self).__init__(
1124
- prompts,
1125
- num_steps,
1126
- cross_replace_steps,
1127
- self_replace_steps,
1128
- local_blend,
1129
- tokenizer,
1130
- device,
1131
- )
1132
- self.mapper = get_replacement_mapper(prompts, self.tokenizer).to(self.device)
1133
-
1134
-
1135
- class AttentionRefine(AttentionControlEdit):
1136
- def replace_cross_attention(self, attn_base, att_replace):
1137
- attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3)
1138
- attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas)
1139
- return attn_replace
1140
-
1141
- def __init__(
1142
- self,
1143
- prompts,
1144
- num_steps: int,
1145
- cross_replace_steps: float,
1146
- self_replace_steps: float,
1147
- local_blend: Optional[LocalBlend] = None,
1148
- tokenizer=None,
1149
- device=None,
1150
- ):
1151
- super(AttentionRefine, self).__init__(
1152
- prompts,
1153
- num_steps,
1154
- cross_replace_steps,
1155
- self_replace_steps,
1156
- local_blend,
1157
- tokenizer,
1158
- device,
1159
- )
1160
- self.mapper, alphas = get_refinement_mapper(prompts, self.tokenizer)
1161
- self.mapper, alphas = self.mapper.to(self.device), alphas.to(self.device)
1162
- self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1])
1163
-
1164
-
1165
- class AttentionReweight(AttentionControlEdit):
1166
- def replace_cross_attention(self, attn_base, att_replace):
1167
- if self.prev_controller is not None:
1168
- attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace)
1169
- attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :]
1170
- return attn_replace
1171
-
1172
- def __init__(
1173
- self,
1174
- prompts,
1175
- num_steps: int,
1176
- cross_replace_steps: float,
1177
- self_replace_steps: float,
1178
- equalizer,
1179
- local_blend: Optional[LocalBlend] = None,
1180
- controller: Optional[AttentionControlEdit] = None,
1181
- tokenizer=None,
1182
- device=None,
1183
- ):
1184
- super(AttentionReweight, self).__init__(
1185
- prompts,
1186
- num_steps,
1187
- cross_replace_steps,
1188
- self_replace_steps,
1189
- local_blend,
1190
- tokenizer,
1191
- device,
1192
- )
1193
- self.equalizer = equalizer.to(self.device)
1194
- self.prev_controller = controller
1195
-
1196
-
1197
- ### util functions for all Edits
1198
- def update_alpha_time_word(
1199
- alpha,
1200
- bounds: Union[float, Tuple[float, float]],
1201
- prompt_ind: int,
1202
- word_inds: Optional[torch.Tensor] = None,
1203
- ):
1204
- if isinstance(bounds, float):
1205
- bounds = 0, bounds
1206
- start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0])
1207
- if word_inds is None:
1208
- word_inds = torch.arange(alpha.shape[2])
1209
- alpha[:start, prompt_ind, word_inds] = 0
1210
- alpha[start:end, prompt_ind, word_inds] = 1
1211
- alpha[end:, prompt_ind, word_inds] = 0
1212
- return alpha
1213
-
1214
-
1215
- def get_time_words_attention_alpha(
1216
- prompts,
1217
- num_steps,
1218
- cross_replace_steps: Union[float, Dict[str, Tuple[float, float]]],
1219
- tokenizer,
1220
- max_num_words=77,
1221
- ):
1222
- if not isinstance(cross_replace_steps, dict):
1223
- cross_replace_steps = {"default_": cross_replace_steps}
1224
- if "default_" not in cross_replace_steps:
1225
- cross_replace_steps["default_"] = (0.0, 1.0)
1226
- alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words)
1227
- for i in range(len(prompts) - 1):
1228
- alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"], i)
1229
- for key, item in cross_replace_steps.items():
1230
- if key != "default_":
1231
- inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))]
1232
- for i, ind in enumerate(inds):
1233
- if len(ind) > 0:
1234
- alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind)
1235
- alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words)
1236
- return alpha_time_words
1237
-
1238
-
1239
- ### util functions for LocalBlend and ReplacementEdit
1240
- def get_word_inds(text: str, word_place: int, tokenizer):
1241
- split_text = text.split(" ")
1242
- if isinstance(word_place, str):
1243
- word_place = [i for i, word in enumerate(split_text) if word_place == word]
1244
- elif isinstance(word_place, int):
1245
- word_place = [word_place]
1246
- out = []
1247
- if len(word_place) > 0:
1248
- words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1]
1249
- cur_len, ptr = 0, 0
1250
-
1251
- for i in range(len(words_encode)):
1252
- cur_len += len(words_encode[i])
1253
- if ptr in word_place:
1254
- out.append(i + 1)
1255
- if cur_len >= len(split_text[ptr]):
1256
- ptr += 1
1257
- cur_len = 0
1258
- return np.array(out)
1259
-
1260
-
1261
- ### util functions for ReplacementEdit
1262
- def get_replacement_mapper_(x: str, y: str, tokenizer, max_len=77):
1263
- words_x = x.split(" ")
1264
- words_y = y.split(" ")
1265
- if len(words_x) != len(words_y):
1266
- raise ValueError(
1267
- f"attention replacement edit can only be applied on prompts with the same length"
1268
- f" but prompt A has {len(words_x)} words and prompt B has {len(words_y)} words."
1269
- )
1270
- inds_replace = [i for i in range(len(words_y)) if words_y[i] != words_x[i]]
1271
- inds_source = [get_word_inds(x, i, tokenizer) for i in inds_replace]
1272
- inds_target = [get_word_inds(y, i, tokenizer) for i in inds_replace]
1273
- mapper = np.zeros((max_len, max_len))
1274
- i = j = 0
1275
- cur_inds = 0
1276
- while i < max_len and j < max_len:
1277
- if cur_inds < len(inds_source) and inds_source[cur_inds][0] == i:
1278
- inds_source_, inds_target_ = inds_source[cur_inds], inds_target[cur_inds]
1279
- if len(inds_source_) == len(inds_target_):
1280
- mapper[inds_source_, inds_target_] = 1
1281
- else:
1282
- ratio = 1 / len(inds_target_)
1283
- for i_t in inds_target_:
1284
- mapper[inds_source_, i_t] = ratio
1285
- cur_inds += 1
1286
- i += len(inds_source_)
1287
- j += len(inds_target_)
1288
- elif cur_inds < len(inds_source):
1289
- mapper[i, j] = 1
1290
- i += 1
1291
- j += 1
1292
- else:
1293
- mapper[j, j] = 1
1294
- i += 1
1295
- j += 1
1296
-
1297
- return torch.from_numpy(mapper).float()
1298
-
1299
-
1300
- def get_replacement_mapper(prompts, tokenizer, max_len=77):
1301
- x_seq = prompts[0]
1302
- mappers = []
1303
- for i in range(1, len(prompts)):
1304
- mapper = get_replacement_mapper_(x_seq, prompts[i], tokenizer, max_len)
1305
- mappers.append(mapper)
1306
- return torch.stack(mappers)
1307
-
1308
-
1309
- ### util functions for ReweightEdit
1310
- def get_equalizer(
1311
- text: str,
1312
- word_select: Union[int, Tuple[int, ...]],
1313
- values: Union[List[float], Tuple[float, ...]],
1314
- tokenizer,
1315
- ):
1316
- if isinstance(word_select, (int, str)):
1317
- word_select = (word_select,)
1318
- equalizer = torch.ones(len(values), 77)
1319
- values = torch.tensor(values, dtype=torch.float32)
1320
- for word in word_select:
1321
- inds = get_word_inds(text, word, tokenizer)
1322
- equalizer[:, inds] = values
1323
- return equalizer
1324
-
1325
-
1326
- ### util functions for RefinementEdit
1327
- class ScoreParams:
1328
- def __init__(self, gap, match, mismatch):
1329
- self.gap = gap
1330
- self.match = match
1331
- self.mismatch = mismatch
1332
-
1333
- def mis_match_char(self, x, y):
1334
- if x != y:
1335
- return self.mismatch
1336
- else:
1337
- return self.match
1338
-
1339
-
1340
- def get_matrix(size_x, size_y, gap):
1341
- matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32)
1342
- matrix[0, 1:] = (np.arange(size_y) + 1) * gap
1343
- matrix[1:, 0] = (np.arange(size_x) + 1) * gap
1344
- return matrix
1345
-
1346
-
1347
- def get_traceback_matrix(size_x, size_y):
1348
- matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32)
1349
- matrix[0, 1:] = 1
1350
- matrix[1:, 0] = 2
1351
- matrix[0, 0] = 4
1352
- return matrix
1353
-
1354
-
1355
- def global_align(x, y, score):
1356
- matrix = get_matrix(len(x), len(y), score.gap)
1357
- trace_back = get_traceback_matrix(len(x), len(y))
1358
- for i in range(1, len(x) + 1):
1359
- for j in range(1, len(y) + 1):
1360
- left = matrix[i, j - 1] + score.gap
1361
- up = matrix[i - 1, j] + score.gap
1362
- diag = matrix[i - 1, j - 1] + score.mis_match_char(x[i - 1], y[j - 1])
1363
- matrix[i, j] = max(left, up, diag)
1364
- if matrix[i, j] == left:
1365
- trace_back[i, j] = 1
1366
- elif matrix[i, j] == up:
1367
- trace_back[i, j] = 2
1368
- else:
1369
- trace_back[i, j] = 3
1370
- return matrix, trace_back
1371
-
1372
-
1373
- def get_aligned_sequences(x, y, trace_back):
1374
- x_seq = []
1375
- y_seq = []
1376
- i = len(x)
1377
- j = len(y)
1378
- mapper_y_to_x = []
1379
- while i > 0 or j > 0:
1380
- if trace_back[i, j] == 3:
1381
- x_seq.append(x[i - 1])
1382
- y_seq.append(y[j - 1])
1383
- i = i - 1
1384
- j = j - 1
1385
- mapper_y_to_x.append((j, i))
1386
- elif trace_back[i][j] == 1:
1387
- x_seq.append("-")
1388
- y_seq.append(y[j - 1])
1389
- j = j - 1
1390
- mapper_y_to_x.append((j, -1))
1391
- elif trace_back[i][j] == 2:
1392
- x_seq.append(x[i - 1])
1393
- y_seq.append("-")
1394
- i = i - 1
1395
- elif trace_back[i][j] == 4:
1396
- break
1397
- mapper_y_to_x.reverse()
1398
- return x_seq, y_seq, torch.tensor(mapper_y_to_x, dtype=torch.int64)
1399
-
1400
-
1401
- def get_mapper(x: str, y: str, tokenizer, max_len=77):
1402
- x_seq = tokenizer.encode(x)
1403
- y_seq = tokenizer.encode(y)
1404
- score = ScoreParams(0, 1, -1)
1405
- matrix, trace_back = global_align(x_seq, y_seq, score)
1406
- mapper_base = get_aligned_sequences(x_seq, y_seq, trace_back)[-1]
1407
- alphas = torch.ones(max_len)
1408
- alphas[: mapper_base.shape[0]] = mapper_base[:, 1].ne(-1).float()
1409
- mapper = torch.zeros(max_len, dtype=torch.int64)
1410
- mapper[: mapper_base.shape[0]] = mapper_base[:, 1]
1411
- mapper[mapper_base.shape[0] :] = len(y_seq) + torch.arange(max_len - len(y_seq))
1412
- return mapper, alphas
1413
-
1414
-
1415
- def get_refinement_mapper(prompts, tokenizer, max_len=77):
1416
- x_seq = prompts[0]
1417
- mappers, alphas = [], []
1418
- for i in range(1, len(prompts)):
1419
- mapper, alpha = get_mapper(x_seq, prompts[i], tokenizer, max_len)
1420
- mappers.append(mapper)
1421
- alphas.append(alpha)
1422
- return torch.stack(mappers), torch.stack(alphas)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_sdxl_style_aligned.py DELETED
@@ -1,1916 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- #
15
- # Based on [Style Aligned Image Generation via Shared Attention](https://arxiv.org/abs/2312.02133).
16
- # Authors: Amir Hertz, Andrey Voynov, Shlomi Fruchter, Daniel Cohen-Or
17
- # Project Page: https://style-aligned-gen.github.io/
18
- # Code: https://github.com/google/style-aligned
19
- #
20
- # Adapted to Diffusers by [Aryan V S](https://github.com/a-r-r-o-w/).
21
-
22
- import inspect
23
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
24
-
25
- import torch
26
- import torch.nn as nn
27
- import torch.nn.functional as F
28
- from PIL import Image
29
- from transformers import (
30
- CLIPImageProcessor,
31
- CLIPTextModel,
32
- CLIPTextModelWithProjection,
33
- CLIPTokenizer,
34
- CLIPVisionModelWithProjection,
35
- )
36
-
37
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
38
- from diffusers.loaders import (
39
- FromSingleFileMixin,
40
- IPAdapterMixin,
41
- StableDiffusionXLLoraLoaderMixin,
42
- TextualInversionLoaderMixin,
43
- )
44
- from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
45
- from diffusers.models.attention_processor import (
46
- Attention,
47
- AttnProcessor2_0,
48
- FusedAttnProcessor2_0,
49
- LoRAAttnProcessor2_0,
50
- LoRAXFormersAttnProcessor,
51
- XFormersAttnProcessor,
52
- )
53
- from diffusers.models.lora import adjust_lora_scale_text_encoder
54
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
55
- from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
56
- from diffusers.schedulers import KarrasDiffusionSchedulers
57
- from diffusers.utils import (
58
- USE_PEFT_BACKEND,
59
- deprecate,
60
- is_invisible_watermark_available,
61
- is_torch_xla_available,
62
- logging,
63
- replace_example_docstring,
64
- scale_lora_layers,
65
- unscale_lora_layers,
66
- )
67
- from diffusers.utils.torch_utils import randn_tensor
68
-
69
-
70
- if is_invisible_watermark_available():
71
- from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
72
-
73
- if is_torch_xla_available():
74
- import torch_xla.core.xla_model as xm
75
-
76
- XLA_AVAILABLE = True
77
- else:
78
- XLA_AVAILABLE = False
79
-
80
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
81
-
82
- EXAMPLE_DOC_STRING = """
83
- Examples:
84
- ```py
85
- >>> from typing import List
86
-
87
- >>> import torch
88
- >>> from diffusers.pipelines.pipeline_utils import DiffusionPipeline
89
- >>> from PIL import Image
90
-
91
- >>> model_id = "a-r-r-o-w/dreamshaper-xl-turbo"
92
- >>> pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", custom_pipeline="pipeline_sdxl_style_aligned")
93
- >>> pipe = pipe.to("cuda")
94
-
95
- # Enable memory saving techniques
96
- >>> pipe.enable_vae_slicing()
97
- >>> pipe.enable_vae_tiling()
98
-
99
- >>> prompt = [
100
- ... "a toy train. macro photo. 3d game asset",
101
- ... "a toy airplane. macro photo. 3d game asset",
102
- ... "a toy bicycle. macro photo. 3d game asset",
103
- ... "a toy car. macro photo. 3d game asset",
104
- ... ]
105
- >>> negative_prompt = "low quality, worst quality, "
106
-
107
- >>> # Enable StyleAligned
108
- >>> pipe.enable_style_aligned(
109
- ... share_group_norm=False,
110
- ... share_layer_norm=False,
111
- ... share_attention=True,
112
- ... adain_queries=True,
113
- ... adain_keys=True,
114
- ... adain_values=False,
115
- ... full_attention_share=False,
116
- ... shared_score_scale=1.0,
117
- ... shared_score_shift=0.0,
118
- ... only_self_level=0.0,
119
- >>> )
120
-
121
- >>> # Run inference
122
- >>> images = pipe(
123
- ... prompt=prompt,
124
- ... negative_prompt=negative_prompt,
125
- ... guidance_scale=2,
126
- ... height=1024,
127
- ... width=1024,
128
- ... num_inference_steps=10,
129
- ... generator=torch.Generator().manual_seed(42),
130
- >>> ).images
131
-
132
- >>> # Disable StyleAligned if you do not wish to use it anymore
133
- >>> pipe.disable_style_aligned()
134
- ```
135
- """
136
-
137
-
138
- def expand_first(feat: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
139
- b = feat.shape[0]
140
- feat_style = torch.stack((feat[0], feat[b // 2])).unsqueeze(1)
141
- if scale == 1:
142
- feat_style = feat_style.expand(2, b // 2, *feat.shape[1:])
143
- else:
144
- feat_style = feat_style.repeat(1, b // 2, 1, 1, 1)
145
- feat_style = torch.cat([feat_style[:, :1], scale * feat_style[:, 1:]], dim=1)
146
- return feat_style.reshape(*feat.shape)
147
-
148
-
149
- def concat_first(feat: torch.Tensor, dim: int = 2, scale: float = 1.0) -> torch.Tensor:
150
- feat_style = expand_first(feat, scale=scale)
151
- return torch.cat((feat, feat_style), dim=dim)
152
-
153
-
154
- def calc_mean_std(feat: torch.Tensor, eps: float = 1e-5) -> Tuple[torch.Tensor, torch.Tensor]:
155
- feat_std = (feat.var(dim=-2, keepdims=True) + eps).sqrt()
156
- feat_mean = feat.mean(dim=-2, keepdims=True)
157
- return feat_mean, feat_std
158
-
159
-
160
- def adain(feat: torch.Tensor) -> torch.Tensor:
161
- feat_mean, feat_std = calc_mean_std(feat)
162
- feat_style_mean = expand_first(feat_mean)
163
- feat_style_std = expand_first(feat_std)
164
- feat = (feat - feat_mean) / feat_std
165
- feat = feat * feat_style_std + feat_style_mean
166
- return feat
167
-
168
-
169
- def get_switch_vec(total_num_layers, level):
170
- if level == 0:
171
- return torch.zeros(total_num_layers, dtype=torch.bool)
172
- if level == 1:
173
- return torch.ones(total_num_layers, dtype=torch.bool)
174
- to_flip = level > 0.5
175
- if to_flip:
176
- level = 1 - level
177
- num_switch = int(level * total_num_layers)
178
- vec = torch.arange(total_num_layers)
179
- vec = vec % (total_num_layers // num_switch)
180
- vec = vec == 0
181
- if to_flip:
182
- vec = ~vec
183
- return vec
184
-
185
-
186
- class SharedAttentionProcessor(AttnProcessor2_0):
187
- def __init__(
188
- self,
189
- share_attention: bool = True,
190
- adain_queries: bool = True,
191
- adain_keys: bool = True,
192
- adain_values: bool = False,
193
- full_attention_share: bool = False,
194
- shared_score_scale: float = 1.0,
195
- shared_score_shift: float = 0.0,
196
- ):
197
- r"""Shared Attention Processor as proposed in the StyleAligned paper."""
198
- super().__init__()
199
- self.share_attention = share_attention
200
- self.adain_queries = adain_queries
201
- self.adain_keys = adain_keys
202
- self.adain_values = adain_values
203
- self.full_attention_share = full_attention_share
204
- self.shared_score_scale = shared_score_scale
205
- self.shared_score_shift = shared_score_shift
206
-
207
- def shifted_scaled_dot_product_attention(
208
- self, attn: Attention, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
209
- ) -> torch.Tensor:
210
- logits = torch.einsum("bhqd,bhkd->bhqk", query, key) * attn.scale
211
- logits[:, :, :, query.shape[2] :] += self.shared_score_shift
212
- probs = logits.softmax(-1)
213
- return torch.einsum("bhqk,bhkd->bhqd", probs, value)
214
-
215
- def shared_call(
216
- self,
217
- attn: Attention,
218
- hidden_states: torch.Tensor,
219
- encoder_hidden_states: Optional[torch.Tensor] = None,
220
- attention_mask: Optional[torch.Tensor] = None,
221
- **kwargs,
222
- ):
223
- residual = hidden_states
224
- input_ndim = hidden_states.ndim
225
- if input_ndim == 4:
226
- batch_size, channel, height, width = hidden_states.shape
227
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
228
- batch_size, sequence_length, _ = (
229
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
230
- )
231
-
232
- if attention_mask is not None:
233
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
234
- # scaled_dot_product_attention expects attention_mask shape to be
235
- # (batch, heads, source_length, target_length)
236
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
237
-
238
- if attn.group_norm is not None:
239
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
240
-
241
- query = attn.to_q(hidden_states)
242
- key = attn.to_k(hidden_states)
243
- value = attn.to_v(hidden_states)
244
- inner_dim = key.shape[-1]
245
- head_dim = inner_dim // attn.heads
246
-
247
- query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
248
- key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
249
- value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
250
-
251
- if self.adain_queries:
252
- query = adain(query)
253
- if self.adain_keys:
254
- key = adain(key)
255
- if self.adain_values:
256
- value = adain(value)
257
- if self.share_attention:
258
- key = concat_first(key, -2, scale=self.shared_score_scale)
259
- value = concat_first(value, -2)
260
- if self.shared_score_shift != 0:
261
- hidden_states = self.shifted_scaled_dot_product_attention(attn, query, key, value)
262
- else:
263
- hidden_states = F.scaled_dot_product_attention(
264
- query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
265
- )
266
- else:
267
- hidden_states = F.scaled_dot_product_attention(
268
- query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
269
- )
270
-
271
- hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
272
- hidden_states = hidden_states.to(query.dtype)
273
-
274
- # linear proj
275
- hidden_states = attn.to_out[0](hidden_states)
276
- # dropout
277
- hidden_states = attn.to_out[1](hidden_states)
278
-
279
- if input_ndim == 4:
280
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
281
-
282
- if attn.residual_connection:
283
- hidden_states = hidden_states + residual
284
-
285
- hidden_states = hidden_states / attn.rescale_output_factor
286
- return hidden_states
287
-
288
- def __call__(
289
- self,
290
- attn: Attention,
291
- hidden_states: torch.Tensor,
292
- encoder_hidden_states: Optional[torch.Tensor] = None,
293
- attention_mask: Optional[torch.Tensor] = None,
294
- **kwargs,
295
- ):
296
- if self.full_attention_share:
297
- b, n, d = hidden_states.shape
298
- k = 2
299
- hidden_states = hidden_states.view(k, b, n, d).permute(0, 1, 3, 2).contiguous().view(-1, n, d)
300
- # hidden_states = einops.rearrange(hidden_states, "(k b) n d -> k (b n) d", k=2)
301
- hidden_states = super().__call__(
302
- attn,
303
- hidden_states,
304
- encoder_hidden_states=encoder_hidden_states,
305
- attention_mask=attention_mask,
306
- **kwargs,
307
- )
308
- hidden_states = hidden_states.view(k, b, n, d).permute(0, 1, 3, 2).contiguous().view(-1, n, d)
309
- # hidden_states = einops.rearrange(hidden_states, "k (b n) d -> (k b) n d", n=n)
310
- else:
311
- hidden_states = self.shared_call(attn, hidden_states, hidden_states, attention_mask, **kwargs)
312
-
313
- return hidden_states
314
-
315
-
316
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
317
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
318
- """
319
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
320
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
321
- """
322
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
323
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
324
- # rescale the results from guidance (fixes overexposure)
325
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
326
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
327
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
328
- return noise_cfg
329
-
330
-
331
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
332
- def retrieve_timesteps(
333
- scheduler,
334
- num_inference_steps: Optional[int] = None,
335
- device: Optional[Union[str, torch.device]] = None,
336
- timesteps: Optional[List[int]] = None,
337
- **kwargs,
338
- ):
339
- """
340
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
341
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
342
-
343
- Args:
344
- scheduler (`SchedulerMixin`):
345
- The scheduler to get timesteps from.
346
- num_inference_steps (`int`):
347
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
348
- `timesteps` must be `None`.
349
- device (`str` or `torch.device`, *optional*):
350
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
351
- timesteps (`List[int]`, *optional*):
352
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
353
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
354
- must be `None`.
355
-
356
- Returns:
357
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
358
- second element is the number of inference steps.
359
- """
360
- if timesteps is not None:
361
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
362
- if not accepts_timesteps:
363
- raise ValueError(
364
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
365
- f" timestep schedules. Please check whether you are using the correct scheduler."
366
- )
367
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
368
- timesteps = scheduler.timesteps
369
- num_inference_steps = len(timesteps)
370
- else:
371
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
372
- timesteps = scheduler.timesteps
373
- return timesteps, num_inference_steps
374
-
375
-
376
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
377
- def retrieve_latents(
378
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
379
- ):
380
- if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
381
- return encoder_output.latent_dist.sample(generator)
382
- elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
383
- return encoder_output.latent_dist.mode()
384
- elif hasattr(encoder_output, "latents"):
385
- return encoder_output.latents
386
- else:
387
- raise AttributeError("Could not access latents of provided encoder_output")
388
-
389
-
390
- class StyleAlignedSDXLPipeline(
391
- DiffusionPipeline,
392
- StableDiffusionMixin,
393
- FromSingleFileMixin,
394
- StableDiffusionXLLoraLoaderMixin,
395
- TextualInversionLoaderMixin,
396
- IPAdapterMixin,
397
- ):
398
- r"""
399
- Pipeline for text-to-image generation using Stable Diffusion XL.
400
-
401
- This pipeline also adds experimental support for [StyleAligned](https://arxiv.org/abs/2312.02133). It can
402
- be enabled/disabled using `.enable_style_aligned()` or `.disable_style_aligned()` respectively.
403
-
404
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
405
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
406
-
407
- The pipeline also inherits the following loading methods:
408
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
409
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
410
- - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
411
- - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
412
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
413
-
414
- Args:
415
- vae ([`AutoencoderKL`]):
416
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
417
- text_encoder ([`CLIPTextModel`]):
418
- Frozen text-encoder. Stable Diffusion XL uses the text portion of
419
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
420
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
421
- text_encoder_2 ([` CLIPTextModelWithProjection`]):
422
- Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
423
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
424
- specifically the
425
- [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
426
- variant.
427
- tokenizer (`CLIPTokenizer`):
428
- Tokenizer of class
429
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
430
- tokenizer_2 (`CLIPTokenizer`):
431
- Second Tokenizer of class
432
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
433
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
434
- scheduler ([`SchedulerMixin`]):
435
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
436
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
437
- force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
438
- Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
439
- `stabilityai/stable-diffusion-xl-base-1-0`.
440
- add_watermarker (`bool`, *optional*):
441
- Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
442
- watermark output images. If not defined, it will default to True if the package is installed, otherwise no
443
- watermarker will be used.
444
- """
445
-
446
- model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
447
- _optional_components = [
448
- "tokenizer",
449
- "tokenizer_2",
450
- "text_encoder",
451
- "text_encoder_2",
452
- "image_encoder",
453
- "feature_extractor",
454
- ]
455
- _callback_tensor_inputs = [
456
- "latents",
457
- "prompt_embeds",
458
- "negative_prompt_embeds",
459
- "add_text_embeds",
460
- "add_time_ids",
461
- "negative_pooled_prompt_embeds",
462
- "negative_add_time_ids",
463
- ]
464
-
465
- def __init__(
466
- self,
467
- vae: AutoencoderKL,
468
- text_encoder: CLIPTextModel,
469
- text_encoder_2: CLIPTextModelWithProjection,
470
- tokenizer: CLIPTokenizer,
471
- tokenizer_2: CLIPTokenizer,
472
- unet: UNet2DConditionModel,
473
- scheduler: KarrasDiffusionSchedulers,
474
- image_encoder: CLIPVisionModelWithProjection = None,
475
- feature_extractor: CLIPImageProcessor = None,
476
- force_zeros_for_empty_prompt: bool = True,
477
- add_watermarker: Optional[bool] = None,
478
- ):
479
- super().__init__()
480
-
481
- self.register_modules(
482
- vae=vae,
483
- text_encoder=text_encoder,
484
- text_encoder_2=text_encoder_2,
485
- tokenizer=tokenizer,
486
- tokenizer_2=tokenizer_2,
487
- unet=unet,
488
- scheduler=scheduler,
489
- image_encoder=image_encoder,
490
- feature_extractor=feature_extractor,
491
- )
492
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
493
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
494
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
495
- self.mask_processor = VaeImageProcessor(
496
- vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
497
- )
498
-
499
- self.default_sample_size = self.unet.config.sample_size
500
-
501
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
502
-
503
- if add_watermarker:
504
- self.watermark = StableDiffusionXLWatermarker()
505
- else:
506
- self.watermark = None
507
-
508
- def encode_prompt(
509
- self,
510
- prompt: str,
511
- prompt_2: Optional[str] = None,
512
- device: Optional[torch.device] = None,
513
- num_images_per_prompt: int = 1,
514
- do_classifier_free_guidance: bool = True,
515
- negative_prompt: Optional[str] = None,
516
- negative_prompt_2: Optional[str] = None,
517
- prompt_embeds: Optional[torch.Tensor] = None,
518
- negative_prompt_embeds: Optional[torch.Tensor] = None,
519
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
520
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
521
- lora_scale: Optional[float] = None,
522
- clip_skip: Optional[int] = None,
523
- ):
524
- r"""
525
- Encodes the prompt into text encoder hidden states.
526
-
527
- Args:
528
- prompt (`str` or `List[str]`, *optional*):
529
- prompt to be encoded
530
- prompt_2 (`str` or `List[str]`, *optional*):
531
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
532
- used in both text-encoders
533
- device: (`torch.device`):
534
- torch device
535
- num_images_per_prompt (`int`):
536
- number of images that should be generated per prompt
537
- do_classifier_free_guidance (`bool`):
538
- whether to use classifier free guidance or not
539
- negative_prompt (`str` or `List[str]`, *optional*):
540
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
541
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
542
- less than `1`).
543
- negative_prompt_2 (`str` or `List[str]`, *optional*):
544
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
545
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
546
- prompt_embeds (`torch.Tensor`, *optional*):
547
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
548
- provided, text embeddings will be generated from `prompt` input argument.
549
- negative_prompt_embeds (`torch.Tensor`, *optional*):
550
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
551
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
552
- argument.
553
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
554
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
555
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
556
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
557
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
558
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
559
- input argument.
560
- lora_scale (`float`, *optional*):
561
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
562
- clip_skip (`int`, *optional*):
563
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
564
- the output of the pre-final layer will be used for computing the prompt embeddings.
565
- """
566
- device = device or self._execution_device
567
-
568
- # set lora scale so that monkey patched LoRA
569
- # function of text encoder can correctly access it
570
- if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
571
- self._lora_scale = lora_scale
572
-
573
- # dynamically adjust the LoRA scale
574
- if self.text_encoder is not None:
575
- if not USE_PEFT_BACKEND:
576
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
577
- else:
578
- scale_lora_layers(self.text_encoder, lora_scale)
579
-
580
- if self.text_encoder_2 is not None:
581
- if not USE_PEFT_BACKEND:
582
- adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
583
- else:
584
- scale_lora_layers(self.text_encoder_2, lora_scale)
585
-
586
- prompt = [prompt] if isinstance(prompt, str) else prompt
587
-
588
- if prompt is not None:
589
- batch_size = len(prompt)
590
- else:
591
- batch_size = prompt_embeds.shape[0]
592
-
593
- # Define tokenizers and text encoders
594
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
595
- text_encoders = (
596
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
597
- )
598
-
599
- if prompt_embeds is None:
600
- prompt_2 = prompt_2 or prompt
601
- prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
602
-
603
- # textual inversion: process multi-vector tokens if necessary
604
- prompt_embeds_list = []
605
- prompts = [prompt, prompt_2]
606
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
607
- if isinstance(self, TextualInversionLoaderMixin):
608
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
609
-
610
- text_inputs = tokenizer(
611
- prompt,
612
- padding="max_length",
613
- max_length=tokenizer.model_max_length,
614
- truncation=True,
615
- return_tensors="pt",
616
- )
617
-
618
- text_input_ids = text_inputs.input_ids
619
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
620
-
621
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
622
- text_input_ids, untruncated_ids
623
- ):
624
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
625
- logger.warning(
626
- "The following part of your input was truncated because CLIP can only handle sequences up to"
627
- f" {tokenizer.model_max_length} tokens: {removed_text}"
628
- )
629
-
630
- prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
631
-
632
- # We are only ALWAYS interested in the pooled output of the final text encoder
633
- pooled_prompt_embeds = prompt_embeds[0]
634
- if clip_skip is None:
635
- prompt_embeds = prompt_embeds.hidden_states[-2]
636
- else:
637
- # "2" because SDXL always indexes from the penultimate layer.
638
- prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
639
-
640
- prompt_embeds_list.append(prompt_embeds)
641
-
642
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
643
-
644
- # get unconditional embeddings for classifier free guidance
645
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
646
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
647
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
648
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
649
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
650
- negative_prompt = negative_prompt or ""
651
- negative_prompt_2 = negative_prompt_2 or negative_prompt
652
-
653
- # normalize str to list
654
- negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
655
- negative_prompt_2 = (
656
- batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
657
- )
658
-
659
- uncond_tokens: List[str]
660
- if prompt is not None and type(prompt) is not type(negative_prompt):
661
- raise TypeError(
662
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
663
- f" {type(prompt)}."
664
- )
665
- elif batch_size != len(negative_prompt):
666
- raise ValueError(
667
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
668
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
669
- " the batch size of `prompt`."
670
- )
671
- else:
672
- uncond_tokens = [negative_prompt, negative_prompt_2]
673
-
674
- negative_prompt_embeds_list = []
675
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
676
- if isinstance(self, TextualInversionLoaderMixin):
677
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
678
-
679
- max_length = prompt_embeds.shape[1]
680
- uncond_input = tokenizer(
681
- negative_prompt,
682
- padding="max_length",
683
- max_length=max_length,
684
- truncation=True,
685
- return_tensors="pt",
686
- )
687
-
688
- negative_prompt_embeds = text_encoder(
689
- uncond_input.input_ids.to(device),
690
- output_hidden_states=True,
691
- )
692
- # We are only ALWAYS interested in the pooled output of the final text encoder
693
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
694
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
695
-
696
- negative_prompt_embeds_list.append(negative_prompt_embeds)
697
-
698
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
699
-
700
- if self.text_encoder_2 is not None:
701
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
702
- else:
703
- prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
704
-
705
- bs_embed, seq_len, _ = prompt_embeds.shape
706
- # duplicate text embeddings for each generation per prompt, using mps friendly method
707
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
708
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
709
-
710
- if do_classifier_free_guidance:
711
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
712
- seq_len = negative_prompt_embeds.shape[1]
713
-
714
- if self.text_encoder_2 is not None:
715
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
716
- else:
717
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
718
-
719
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
720
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
721
-
722
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
723
- bs_embed * num_images_per_prompt, -1
724
- )
725
- if do_classifier_free_guidance:
726
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
727
- bs_embed * num_images_per_prompt, -1
728
- )
729
-
730
- if self.text_encoder is not None:
731
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
732
- # Retrieve the original scale by scaling back the LoRA layers
733
- unscale_lora_layers(self.text_encoder, lora_scale)
734
-
735
- if self.text_encoder_2 is not None:
736
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
737
- # Retrieve the original scale by scaling back the LoRA layers
738
- unscale_lora_layers(self.text_encoder_2, lora_scale)
739
-
740
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
741
-
742
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
743
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
744
- dtype = next(self.image_encoder.parameters()).dtype
745
-
746
- if not isinstance(image, torch.Tensor):
747
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
748
-
749
- image = image.to(device=device, dtype=dtype)
750
- if output_hidden_states:
751
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
752
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
753
- uncond_image_enc_hidden_states = self.image_encoder(
754
- torch.zeros_like(image), output_hidden_states=True
755
- ).hidden_states[-2]
756
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
757
- num_images_per_prompt, dim=0
758
- )
759
- return image_enc_hidden_states, uncond_image_enc_hidden_states
760
- else:
761
- image_embeds = self.image_encoder(image).image_embeds
762
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
763
- uncond_image_embeds = torch.zeros_like(image_embeds)
764
-
765
- return image_embeds, uncond_image_embeds
766
-
767
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
768
- def prepare_extra_step_kwargs(self, generator, eta):
769
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
770
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
771
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
772
- # and should be between [0, 1]
773
-
774
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
775
- extra_step_kwargs = {}
776
- if accepts_eta:
777
- extra_step_kwargs["eta"] = eta
778
-
779
- # check if the scheduler accepts generator
780
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
781
- if accepts_generator:
782
- extra_step_kwargs["generator"] = generator
783
- return extra_step_kwargs
784
-
785
- def check_inputs(
786
- self,
787
- prompt,
788
- prompt_2,
789
- height,
790
- width,
791
- callback_steps,
792
- negative_prompt=None,
793
- negative_prompt_2=None,
794
- prompt_embeds=None,
795
- negative_prompt_embeds=None,
796
- pooled_prompt_embeds=None,
797
- negative_pooled_prompt_embeds=None,
798
- callback_on_step_end_tensor_inputs=None,
799
- ):
800
- if height % 8 != 0 or width % 8 != 0:
801
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
802
-
803
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
804
- raise ValueError(
805
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
806
- f" {type(callback_steps)}."
807
- )
808
-
809
- if callback_on_step_end_tensor_inputs is not None and not all(
810
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
811
- ):
812
- raise ValueError(
813
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
814
- )
815
-
816
- if prompt is not None and prompt_embeds is not None:
817
- raise ValueError(
818
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
819
- " only forward one of the two."
820
- )
821
- elif prompt_2 is not None and prompt_embeds is not None:
822
- raise ValueError(
823
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
824
- " only forward one of the two."
825
- )
826
- elif prompt is None and prompt_embeds is None:
827
- raise ValueError(
828
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
829
- )
830
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
831
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
832
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
833
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
834
-
835
- if negative_prompt is not None and negative_prompt_embeds is not None:
836
- raise ValueError(
837
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
838
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
839
- )
840
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
841
- raise ValueError(
842
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
843
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
844
- )
845
-
846
- if prompt_embeds is not None and negative_prompt_embeds is not None:
847
- if prompt_embeds.shape != negative_prompt_embeds.shape:
848
- raise ValueError(
849
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
850
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
851
- f" {negative_prompt_embeds.shape}."
852
- )
853
-
854
- if prompt_embeds is not None and pooled_prompt_embeds is None:
855
- raise ValueError(
856
- "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
857
- )
858
-
859
- if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
860
- raise ValueError(
861
- "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
862
- )
863
-
864
- def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
865
- # get the original timestep using init_timestep
866
- if denoising_start is None:
867
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
868
- t_start = max(num_inference_steps - init_timestep, 0)
869
- else:
870
- t_start = 0
871
-
872
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
873
-
874
- # Strength is irrelevant if we directly request a timestep to start at;
875
- # that is, strength is determined by the denoising_start instead.
876
- if denoising_start is not None:
877
- discrete_timestep_cutoff = int(
878
- round(
879
- self.scheduler.config.num_train_timesteps
880
- - (denoising_start * self.scheduler.config.num_train_timesteps)
881
- )
882
- )
883
-
884
- num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
885
- if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
886
- # if the scheduler is a 2nd order scheduler we might have to do +1
887
- # because `num_inference_steps` might be even given that every timestep
888
- # (except the highest one) is duplicated. If `num_inference_steps` is even it would
889
- # mean that we cut the timesteps in the middle of the denoising step
890
- # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1
891
- # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
892
- num_inference_steps = num_inference_steps + 1
893
-
894
- # because t_n+1 >= t_n, we slice the timesteps starting from the end
895
- timesteps = timesteps[-num_inference_steps:]
896
- return timesteps, num_inference_steps
897
-
898
- return timesteps, num_inference_steps - t_start
899
-
900
- def prepare_latents(
901
- self,
902
- image,
903
- mask,
904
- width,
905
- height,
906
- num_channels_latents,
907
- timestep,
908
- batch_size,
909
- num_images_per_prompt,
910
- dtype,
911
- device,
912
- generator=None,
913
- add_noise=True,
914
- latents=None,
915
- is_strength_max=True,
916
- return_noise=False,
917
- return_image_latents=False,
918
- ):
919
- batch_size *= num_images_per_prompt
920
-
921
- if image is None:
922
- shape = (
923
- batch_size,
924
- num_channels_latents,
925
- int(height) // self.vae_scale_factor,
926
- int(width) // self.vae_scale_factor,
927
- )
928
- if isinstance(generator, list) and len(generator) != batch_size:
929
- raise ValueError(
930
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
931
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
932
- )
933
-
934
- if latents is None:
935
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
936
- else:
937
- latents = latents.to(device)
938
-
939
- # scale the initial noise by the standard deviation required by the scheduler
940
- latents = latents * self.scheduler.init_noise_sigma
941
- return latents
942
-
943
- elif mask is None:
944
- if not isinstance(image, (torch.Tensor, Image.Image, list)):
945
- raise ValueError(
946
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
947
- )
948
-
949
- # Offload text encoder if `enable_model_cpu_offload` was enabled
950
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
951
- self.text_encoder_2.to("cpu")
952
- torch.cuda.empty_cache()
953
-
954
- image = image.to(device=device, dtype=dtype)
955
-
956
- if image.shape[1] == 4:
957
- init_latents = image
958
-
959
- else:
960
- # make sure the VAE is in float32 mode, as it overflows in float16
961
- if self.vae.config.force_upcast:
962
- image = image.float()
963
- self.vae.to(dtype=torch.float32)
964
-
965
- if isinstance(generator, list) and len(generator) != batch_size:
966
- raise ValueError(
967
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
968
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
969
- )
970
-
971
- elif isinstance(generator, list):
972
- init_latents = [
973
- retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
974
- for i in range(batch_size)
975
- ]
976
- init_latents = torch.cat(init_latents, dim=0)
977
- else:
978
- init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
979
-
980
- if self.vae.config.force_upcast:
981
- self.vae.to(dtype)
982
-
983
- init_latents = init_latents.to(dtype)
984
- init_latents = self.vae.config.scaling_factor * init_latents
985
-
986
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
987
- # expand init_latents for batch_size
988
- additional_image_per_prompt = batch_size // init_latents.shape[0]
989
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
990
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
991
- raise ValueError(
992
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
993
- )
994
- else:
995
- init_latents = torch.cat([init_latents], dim=0)
996
-
997
- if add_noise:
998
- shape = init_latents.shape
999
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
1000
- # get latents
1001
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
1002
-
1003
- latents = init_latents
1004
- return latents
1005
-
1006
- else:
1007
- shape = (
1008
- batch_size,
1009
- num_channels_latents,
1010
- int(height) // self.vae_scale_factor,
1011
- int(width) // self.vae_scale_factor,
1012
- )
1013
- if isinstance(generator, list) and len(generator) != batch_size:
1014
- raise ValueError(
1015
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
1016
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
1017
- )
1018
-
1019
- if (image is None or timestep is None) and not is_strength_max:
1020
- raise ValueError(
1021
- "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
1022
- "However, either the image or the noise timestep has not been provided."
1023
- )
1024
-
1025
- if image.shape[1] == 4:
1026
- image_latents = image.to(device=device, dtype=dtype)
1027
- image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
1028
- elif return_image_latents or (latents is None and not is_strength_max):
1029
- image = image.to(device=device, dtype=dtype)
1030
- image_latents = self._encode_vae_image(image=image, generator=generator)
1031
- image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
1032
-
1033
- if latents is None and add_noise:
1034
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
1035
- # if strength is 1. then initialise the latents to noise, else initial to image + noise
1036
- latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
1037
- # if pure noise then scale the initial latents by the Scheduler's init sigma
1038
- latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
1039
- elif add_noise:
1040
- noise = latents.to(device)
1041
- latents = noise * self.scheduler.init_noise_sigma
1042
- else:
1043
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
1044
- latents = image_latents.to(device)
1045
-
1046
- outputs = (latents,)
1047
-
1048
- if return_noise:
1049
- outputs += (noise,)
1050
-
1051
- if return_image_latents:
1052
- outputs += (image_latents,)
1053
-
1054
- return outputs
1055
-
1056
- def prepare_mask_latents(
1057
- self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
1058
- ):
1059
- # resize the mask to latents shape as we concatenate the mask to the latents
1060
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
1061
- # and half precision
1062
- mask = torch.nn.functional.interpolate(
1063
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
1064
- )
1065
- mask = mask.to(device=device, dtype=dtype)
1066
-
1067
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
1068
- if mask.shape[0] < batch_size:
1069
- if not batch_size % mask.shape[0] == 0:
1070
- raise ValueError(
1071
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
1072
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
1073
- " of masks that you pass is divisible by the total requested batch size."
1074
- )
1075
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
1076
-
1077
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
1078
-
1079
- if masked_image is not None and masked_image.shape[1] == 4:
1080
- masked_image_latents = masked_image
1081
- else:
1082
- masked_image_latents = None
1083
-
1084
- if masked_image is not None:
1085
- if masked_image_latents is None:
1086
- masked_image = masked_image.to(device=device, dtype=dtype)
1087
- masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
1088
-
1089
- if masked_image_latents.shape[0] < batch_size:
1090
- if not batch_size % masked_image_latents.shape[0] == 0:
1091
- raise ValueError(
1092
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
1093
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
1094
- " Make sure the number of images that you pass is divisible by the total requested batch size."
1095
- )
1096
- masked_image_latents = masked_image_latents.repeat(
1097
- batch_size // masked_image_latents.shape[0], 1, 1, 1
1098
- )
1099
-
1100
- masked_image_latents = (
1101
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
1102
- )
1103
-
1104
- # aligning device to prevent device errors when concating it with the latent model input
1105
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
1106
-
1107
- return mask, masked_image_latents
1108
-
1109
- def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
1110
- dtype = image.dtype
1111
- if self.vae.config.force_upcast:
1112
- image = image.float()
1113
- self.vae.to(dtype=torch.float32)
1114
-
1115
- if isinstance(generator, list):
1116
- image_latents = [
1117
- retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
1118
- for i in range(image.shape[0])
1119
- ]
1120
- image_latents = torch.cat(image_latents, dim=0)
1121
- else:
1122
- image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
1123
-
1124
- if self.vae.config.force_upcast:
1125
- self.vae.to(dtype)
1126
-
1127
- image_latents = image_latents.to(dtype)
1128
- image_latents = self.vae.config.scaling_factor * image_latents
1129
-
1130
- return image_latents
1131
-
1132
- def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
1133
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
1134
-
1135
- passed_add_embed_dim = (
1136
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
1137
- )
1138
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
1139
-
1140
- if expected_add_embed_dim != passed_add_embed_dim:
1141
- raise ValueError(
1142
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
1143
- )
1144
-
1145
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1146
- return add_time_ids
1147
-
1148
- def upcast_vae(self):
1149
- dtype = self.vae.dtype
1150
- self.vae.to(dtype=torch.float32)
1151
- use_torch_2_0_or_xformers = isinstance(
1152
- self.vae.decoder.mid_block.attentions[0].processor,
1153
- (
1154
- AttnProcessor2_0,
1155
- XFormersAttnProcessor,
1156
- LoRAXFormersAttnProcessor,
1157
- LoRAAttnProcessor2_0,
1158
- FusedAttnProcessor2_0,
1159
- ),
1160
- )
1161
- # if xformers or torch_2_0 is used attention block does not need
1162
- # to be in float32 which can save lots of memory
1163
- if use_torch_2_0_or_xformers:
1164
- self.vae.post_quant_conv.to(dtype)
1165
- self.vae.decoder.conv_in.to(dtype)
1166
- self.vae.decoder.mid_block.to(dtype)
1167
-
1168
- def _enable_shared_attention_processors(
1169
- self,
1170
- share_attention: bool,
1171
- adain_queries: bool,
1172
- adain_keys: bool,
1173
- adain_values: bool,
1174
- full_attention_share: bool,
1175
- shared_score_scale: float,
1176
- shared_score_shift: float,
1177
- only_self_level: float,
1178
- ):
1179
- r"""Helper method to enable usage of Shared Attention Processor."""
1180
- attn_procs = {}
1181
- num_self_layers = len([name for name in self.unet.attn_processors.keys() if "attn1" in name])
1182
-
1183
- only_self_vec = get_switch_vec(num_self_layers, only_self_level)
1184
-
1185
- for i, name in enumerate(self.unet.attn_processors.keys()):
1186
- is_self_attention = "attn1" in name
1187
- if is_self_attention:
1188
- if only_self_vec[i // 2]:
1189
- attn_procs[name] = AttnProcessor2_0()
1190
- else:
1191
- attn_procs[name] = SharedAttentionProcessor(
1192
- share_attention=share_attention,
1193
- adain_queries=adain_queries,
1194
- adain_keys=adain_keys,
1195
- adain_values=adain_values,
1196
- full_attention_share=full_attention_share,
1197
- shared_score_scale=shared_score_scale,
1198
- shared_score_shift=shared_score_shift,
1199
- )
1200
- else:
1201
- attn_procs[name] = AttnProcessor2_0()
1202
-
1203
- self.unet.set_attn_processor(attn_procs)
1204
-
1205
- def _disable_shared_attention_processors(self):
1206
- r"""
1207
- Helper method to disable usage of the Shared Attention Processor. All processors
1208
- are reset to the default Attention Processor for pytorch versions above 2.0.
1209
- """
1210
- attn_procs = {}
1211
-
1212
- for i, name in enumerate(self.unet.attn_processors.keys()):
1213
- attn_procs[name] = AttnProcessor2_0()
1214
-
1215
- self.unet.set_attn_processor(attn_procs)
1216
-
1217
- def _register_shared_norm(self, share_group_norm: bool = True, share_layer_norm: bool = True):
1218
- r"""Helper method to register shared group/layer normalization layers."""
1219
-
1220
- def register_norm_forward(norm_layer: Union[nn.GroupNorm, nn.LayerNorm]) -> Union[nn.GroupNorm, nn.LayerNorm]:
1221
- if not hasattr(norm_layer, "orig_forward"):
1222
- setattr(norm_layer, "orig_forward", norm_layer.forward)
1223
- orig_forward = norm_layer.orig_forward
1224
-
1225
- def forward_(hidden_states: torch.Tensor) -> torch.Tensor:
1226
- n = hidden_states.shape[-2]
1227
- hidden_states = concat_first(hidden_states, dim=-2)
1228
- hidden_states = orig_forward(hidden_states)
1229
- return hidden_states[..., :n, :]
1230
-
1231
- norm_layer.forward = forward_
1232
- return norm_layer
1233
-
1234
- def get_norm_layers(pipeline_, norm_layers_: Dict[str, List[Union[nn.GroupNorm, nn.LayerNorm]]]):
1235
- if isinstance(pipeline_, nn.LayerNorm) and share_layer_norm:
1236
- norm_layers_["layer"].append(pipeline_)
1237
- if isinstance(pipeline_, nn.GroupNorm) and share_group_norm:
1238
- norm_layers_["group"].append(pipeline_)
1239
- else:
1240
- for layer in pipeline_.children():
1241
- get_norm_layers(layer, norm_layers_)
1242
-
1243
- norm_layers = {"group": [], "layer": []}
1244
- get_norm_layers(self.unet, norm_layers)
1245
-
1246
- norm_layers_list = []
1247
- for key in ["group", "layer"]:
1248
- for layer in norm_layers[key]:
1249
- norm_layers_list.append(register_norm_forward(layer))
1250
-
1251
- return norm_layers_list
1252
-
1253
- @property
1254
- def style_aligned_enabled(self):
1255
- r"""Returns whether StyleAligned has been enabled in the pipeline or not."""
1256
- return hasattr(self, "_style_aligned_norm_layers") and self._style_aligned_norm_layers is not None
1257
-
1258
- def enable_style_aligned(
1259
- self,
1260
- share_group_norm: bool = True,
1261
- share_layer_norm: bool = True,
1262
- share_attention: bool = True,
1263
- adain_queries: bool = True,
1264
- adain_keys: bool = True,
1265
- adain_values: bool = False,
1266
- full_attention_share: bool = False,
1267
- shared_score_scale: float = 1.0,
1268
- shared_score_shift: float = 0.0,
1269
- only_self_level: float = 0.0,
1270
- ):
1271
- r"""
1272
- Enables the StyleAligned mechanism as in https://arxiv.org/abs/2312.02133.
1273
-
1274
- Args:
1275
- share_group_norm (`bool`, defaults to `True`):
1276
- Whether or not to use shared group normalization layers.
1277
- share_layer_norm (`bool`, defaults to `True`):
1278
- Whether or not to use shared layer normalization layers.
1279
- share_attention (`bool`, defaults to `True`):
1280
- Whether or not to use attention sharing between batch images.
1281
- adain_queries (`bool`, defaults to `True`):
1282
- Whether or not to apply the AdaIn operation on attention queries.
1283
- adain_keys (`bool`, defaults to `True`):
1284
- Whether or not to apply the AdaIn operation on attention keys.
1285
- adain_values (`bool`, defaults to `False`):
1286
- Whether or not to apply the AdaIn operation on attention values.
1287
- full_attention_share (`bool`, defaults to `False`):
1288
- Whether or not to use full attention sharing between all images in a batch. Can
1289
- lead to content leakage within each batch and some loss in diversity.
1290
- shared_score_scale (`float`, defaults to `1.0`):
1291
- Scale for shared attention.
1292
- """
1293
- self._style_aligned_norm_layers = self._register_shared_norm(share_group_norm, share_layer_norm)
1294
- self._enable_shared_attention_processors(
1295
- share_attention=share_attention,
1296
- adain_queries=adain_queries,
1297
- adain_keys=adain_keys,
1298
- adain_values=adain_values,
1299
- full_attention_share=full_attention_share,
1300
- shared_score_scale=shared_score_scale,
1301
- shared_score_shift=shared_score_shift,
1302
- only_self_level=only_self_level,
1303
- )
1304
-
1305
- def disable_style_aligned(self):
1306
- r"""Disables the StyleAligned mechanism if it had been previously enabled."""
1307
- if self.style_aligned_enabled:
1308
- for layer in self._style_aligned_norm_layers:
1309
- layer.forward = layer.orig_forward
1310
-
1311
- self._style_aligned_norm_layers = None
1312
- self._disable_shared_attention_processors()
1313
-
1314
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1315
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
1316
- """
1317
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
1318
-
1319
- Args:
1320
- timesteps (`torch.Tensor`):
1321
- generate embedding vectors at these timesteps
1322
- embedding_dim (`int`, *optional*, defaults to 512):
1323
- dimension of the embeddings to generate
1324
- dtype:
1325
- data type of the generated embeddings
1326
-
1327
- Returns:
1328
- `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
1329
- """
1330
- assert len(w.shape) == 1
1331
- w = w * 1000.0
1332
-
1333
- half_dim = embedding_dim // 2
1334
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
1335
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
1336
- emb = w.to(dtype)[:, None] * emb[None, :]
1337
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
1338
- if embedding_dim % 2 == 1: # zero pad
1339
- emb = torch.nn.functional.pad(emb, (0, 1))
1340
- assert emb.shape == (w.shape[0], embedding_dim)
1341
- return emb
1342
-
1343
- @property
1344
- def guidance_scale(self):
1345
- return self._guidance_scale
1346
-
1347
- @property
1348
- def guidance_rescale(self):
1349
- return self._guidance_rescale
1350
-
1351
- @property
1352
- def clip_skip(self):
1353
- return self._clip_skip
1354
-
1355
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1356
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1357
- # corresponds to doing no classifier free guidance.
1358
- @property
1359
- def do_classifier_free_guidance(self):
1360
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
1361
-
1362
- @property
1363
- def cross_attention_kwargs(self):
1364
- return self._cross_attention_kwargs
1365
-
1366
- @property
1367
- def denoising_end(self):
1368
- return self._denoising_end
1369
-
1370
- @property
1371
- def denoising_start(self):
1372
- return self._denoising_start
1373
-
1374
- @property
1375
- def num_timesteps(self):
1376
- return self._num_timesteps
1377
-
1378
- @property
1379
- def interrupt(self):
1380
- return self._interrupt
1381
-
1382
- @torch.no_grad()
1383
- @replace_example_docstring(EXAMPLE_DOC_STRING)
1384
- def __call__(
1385
- self,
1386
- prompt: Union[str, List[str]] = None,
1387
- prompt_2: Optional[Union[str, List[str]]] = None,
1388
- image: Optional[PipelineImageInput] = None,
1389
- mask_image: Optional[PipelineImageInput] = None,
1390
- masked_image_latents: Optional[torch.Tensor] = None,
1391
- strength: float = 0.3,
1392
- height: Optional[int] = None,
1393
- width: Optional[int] = None,
1394
- num_inference_steps: int = 50,
1395
- timesteps: List[int] = None,
1396
- denoising_start: Optional[float] = None,
1397
- denoising_end: Optional[float] = None,
1398
- guidance_scale: float = 5.0,
1399
- negative_prompt: Optional[Union[str, List[str]]] = None,
1400
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
1401
- num_images_per_prompt: Optional[int] = 1,
1402
- eta: float = 0.0,
1403
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1404
- latents: Optional[torch.Tensor] = None,
1405
- prompt_embeds: Optional[torch.Tensor] = None,
1406
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1407
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
1408
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1409
- ip_adapter_image: Optional[PipelineImageInput] = None,
1410
- output_type: Optional[str] = "pil",
1411
- return_dict: bool = True,
1412
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1413
- guidance_rescale: float = 0.0,
1414
- original_size: Optional[Tuple[int, int]] = None,
1415
- crops_coords_top_left: Tuple[int, int] = (0, 0),
1416
- target_size: Optional[Tuple[int, int]] = None,
1417
- clip_skip: Optional[int] = None,
1418
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1419
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1420
- **kwargs,
1421
- ):
1422
- r"""
1423
- Function invoked when calling the pipeline for generation.
1424
-
1425
- Args:
1426
- prompt (`str` or `List[str]`, *optional*):
1427
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1428
- instead.
1429
- prompt_2 (`str` or `List[str]`, *optional*):
1430
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1431
- used in both text-encoders
1432
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1433
- The height in pixels of the generated image. This is set to 1024 by default for the best results.
1434
- Anything below 512 pixels won't work well for
1435
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1436
- and checkpoints that are not specifically fine-tuned on low resolutions.
1437
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1438
- The width in pixels of the generated image. This is set to 1024 by default for the best results.
1439
- Anything below 512 pixels won't work well for
1440
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1441
- and checkpoints that are not specifically fine-tuned on low resolutions.
1442
- num_inference_steps (`int`, *optional*, defaults to 50):
1443
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1444
- expense of slower inference.
1445
- timesteps (`List[int]`, *optional*):
1446
- Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
1447
- in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
1448
- passed will be used. Must be in descending order.
1449
- denoising_end (`float`, *optional*):
1450
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1451
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
1452
- still retain a substantial amount of noise as determined by the discrete timesteps selected by the
1453
- scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
1454
- "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1455
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
1456
- guidance_scale (`float`, *optional*, defaults to 5.0):
1457
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1458
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1459
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1460
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1461
- usually at the expense of lower image quality.
1462
- negative_prompt (`str` or `List[str]`, *optional*):
1463
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
1464
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1465
- less than `1`).
1466
- negative_prompt_2 (`str` or `List[str]`, *optional*):
1467
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1468
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1469
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1470
- The number of images to generate per prompt.
1471
- eta (`float`, *optional*, defaults to 0.0):
1472
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1473
- [`schedulers.DDIMScheduler`], will be ignored for others.
1474
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1475
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1476
- to make generation deterministic.
1477
- latents (`torch.Tensor`, *optional*):
1478
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1479
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1480
- tensor will ge generated by sampling using the supplied random `generator`.
1481
- prompt_embeds (`torch.Tensor`, *optional*):
1482
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1483
- provided, text embeddings will be generated from `prompt` input argument.
1484
- negative_prompt_embeds (`torch.Tensor`, *optional*):
1485
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1486
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1487
- argument.
1488
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
1489
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1490
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
1491
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
1492
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1493
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1494
- input argument.
1495
- ip_adapter_image: (`PipelineImageInput`, *optional*):
1496
- Optional image input to work with IP Adapters.
1497
- output_type (`str`, *optional*, defaults to `"pil"`):
1498
- The output format of the generate image. Choose between
1499
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1500
- return_dict (`bool`, *optional*, defaults to `True`):
1501
- Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
1502
- of a plain tuple.
1503
- cross_attention_kwargs (`dict`, *optional*):
1504
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1505
- `self.processor` in
1506
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1507
- guidance_rescale (`float`, *optional*, defaults to 0.0):
1508
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1509
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1510
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1511
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
1512
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1513
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1514
- `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1515
- explained in section 2.2 of
1516
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1517
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1518
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1519
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1520
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1521
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1522
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1523
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
1524
- not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1525
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1526
- negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1527
- To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1528
- micro-conditioning as explained in section 2.2 of
1529
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1530
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1531
- negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1532
- To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1533
- micro-conditioning as explained in section 2.2 of
1534
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1535
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1536
- negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1537
- To negatively condition the generation process based on a target image resolution. It should be as same
1538
- as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1539
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1540
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1541
- callback_on_step_end (`Callable`, *optional*):
1542
- A function that calls at the end of each denoising steps during the inference. The function is called
1543
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1544
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1545
- `callback_on_step_end_tensor_inputs`.
1546
- callback_on_step_end_tensor_inputs (`List`, *optional*):
1547
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1548
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1549
- `._callback_tensor_inputs` attribute of your pipeline class.
1550
-
1551
- Examples:
1552
-
1553
- Returns:
1554
- [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
1555
- [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
1556
- `tuple`. When returning a tuple, the first element is a list with the generated images.
1557
- """
1558
-
1559
- callback = kwargs.pop("callback", None)
1560
- callback_steps = kwargs.pop("callback_steps", None)
1561
-
1562
- if callback is not None:
1563
- deprecate(
1564
- "callback",
1565
- "1.0.0",
1566
- "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
1567
- )
1568
- if callback_steps is not None:
1569
- deprecate(
1570
- "callback_steps",
1571
- "1.0.0",
1572
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
1573
- )
1574
-
1575
- # 0. Default height and width to unet
1576
- height = height or self.default_sample_size * self.vae_scale_factor
1577
- width = width or self.default_sample_size * self.vae_scale_factor
1578
-
1579
- original_size = original_size or (height, width)
1580
- target_size = target_size or (height, width)
1581
-
1582
- # 1. Check inputs. Raise error if not correct
1583
- self.check_inputs(
1584
- prompt=prompt,
1585
- prompt_2=prompt_2,
1586
- height=height,
1587
- width=width,
1588
- callback_steps=callback_steps,
1589
- negative_prompt=negative_prompt,
1590
- negative_prompt_2=negative_prompt_2,
1591
- prompt_embeds=prompt_embeds,
1592
- negative_prompt_embeds=negative_prompt_embeds,
1593
- pooled_prompt_embeds=pooled_prompt_embeds,
1594
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1595
- callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
1596
- )
1597
-
1598
- self._guidance_scale = guidance_scale
1599
- self._guidance_rescale = guidance_rescale
1600
- self._clip_skip = clip_skip
1601
- self._cross_attention_kwargs = cross_attention_kwargs
1602
- self._denoising_end = denoising_end
1603
- self._denoising_start = denoising_start
1604
- self._interrupt = False
1605
-
1606
- # 2. Define call parameters
1607
- if prompt is not None and isinstance(prompt, str):
1608
- batch_size = 1
1609
- elif prompt is not None and isinstance(prompt, list):
1610
- batch_size = len(prompt)
1611
- else:
1612
- batch_size = prompt_embeds.shape[0]
1613
-
1614
- device = self._execution_device
1615
-
1616
- # 3. Encode input prompt
1617
- lora_scale = (
1618
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1619
- )
1620
-
1621
- (
1622
- prompt_embeds,
1623
- negative_prompt_embeds,
1624
- pooled_prompt_embeds,
1625
- negative_pooled_prompt_embeds,
1626
- ) = self.encode_prompt(
1627
- prompt=prompt,
1628
- prompt_2=prompt_2,
1629
- device=device,
1630
- num_images_per_prompt=num_images_per_prompt,
1631
- do_classifier_free_guidance=self.do_classifier_free_guidance,
1632
- negative_prompt=negative_prompt,
1633
- negative_prompt_2=negative_prompt_2,
1634
- prompt_embeds=prompt_embeds,
1635
- negative_prompt_embeds=negative_prompt_embeds,
1636
- pooled_prompt_embeds=pooled_prompt_embeds,
1637
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1638
- lora_scale=lora_scale,
1639
- clip_skip=self.clip_skip,
1640
- )
1641
-
1642
- # 4. Preprocess image and mask_image
1643
- if image is not None:
1644
- image = self.image_processor.preprocess(image, height=height, width=width)
1645
- image = image.to(device=self.device, dtype=prompt_embeds.dtype)
1646
-
1647
- if mask_image is not None:
1648
- mask = self.mask_processor.preprocess(mask_image, height=height, width=width)
1649
- mask = mask.to(device=self.device, dtype=prompt_embeds.dtype)
1650
-
1651
- if masked_image_latents is not None:
1652
- masked_image = masked_image_latents
1653
- elif image.shape[1] == 4:
1654
- # if image is in latent space, we can't mask it
1655
- masked_image = None
1656
- else:
1657
- masked_image = image * (mask < 0.5)
1658
- else:
1659
- mask = None
1660
-
1661
- # 4. Prepare timesteps
1662
- def denoising_value_valid(dnv):
1663
- return isinstance(dnv, float) and 0 < dnv < 1
1664
-
1665
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1666
-
1667
- if image is not None:
1668
- timesteps, num_inference_steps = self.get_timesteps(
1669
- num_inference_steps,
1670
- strength,
1671
- device,
1672
- denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None,
1673
- )
1674
-
1675
- # check that number of inference steps is not < 1 - as this doesn't make sense
1676
- if num_inference_steps < 1:
1677
- raise ValueError(
1678
- f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1679
- f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1680
- )
1681
-
1682
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1683
- is_strength_max = strength == 1.0
1684
- add_noise = True if self.denoising_start is None else False
1685
-
1686
- # 5. Prepare latent variables
1687
- num_channels_latents = self.unet.config.in_channels
1688
- num_channels_unet = self.unet.config.in_channels
1689
- return_image_latents = num_channels_unet == 4
1690
-
1691
- latents = self.prepare_latents(
1692
- image=image,
1693
- mask=mask,
1694
- width=width,
1695
- height=height,
1696
- num_channels_latents=num_channels_latents,
1697
- timestep=latent_timestep,
1698
- batch_size=batch_size * num_images_per_prompt,
1699
- num_images_per_prompt=num_images_per_prompt,
1700
- dtype=prompt_embeds.dtype,
1701
- device=device,
1702
- generator=generator,
1703
- add_noise=add_noise,
1704
- latents=latents,
1705
- is_strength_max=is_strength_max,
1706
- return_noise=True,
1707
- return_image_latents=return_image_latents,
1708
- )
1709
-
1710
- if mask is not None:
1711
- if return_image_latents:
1712
- latents, noise, image_latents = latents
1713
- else:
1714
- latents, noise = latents
1715
-
1716
- mask, masked_image_latents = self.prepare_mask_latents(
1717
- mask=mask,
1718
- masked_image=masked_image,
1719
- batch_size=batch_size * num_images_per_prompt,
1720
- height=height,
1721
- width=width,
1722
- dtype=prompt_embeds.dtype,
1723
- device=device,
1724
- generator=generator,
1725
- do_classifier_free_guidance=self.do_classifier_free_guidance,
1726
- )
1727
-
1728
- # Check that sizes of mask, masked image and latents match
1729
- if num_channels_unet == 9:
1730
- # default case for runwayml/stable-diffusion-inpainting
1731
- num_channels_mask = mask.shape[1]
1732
- num_channels_masked_image = masked_image_latents.shape[1]
1733
- if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet:
1734
- raise ValueError(
1735
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1736
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1737
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1738
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
1739
- " `pipeline.unet` or your `mask_image` or `image` input."
1740
- )
1741
- elif num_channels_unet != 4:
1742
- raise ValueError(
1743
- f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1744
- )
1745
-
1746
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1747
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1748
-
1749
- height, width = latents.shape[-2:]
1750
- height = height * self.vae_scale_factor
1751
- width = width * self.vae_scale_factor
1752
-
1753
- original_size = original_size or (height, width)
1754
- target_size = target_size or (height, width)
1755
-
1756
- # 7. Prepare added time ids & embeddings
1757
- add_text_embeds = pooled_prompt_embeds
1758
- add_time_ids = self._get_add_time_ids(
1759
- original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
1760
- )
1761
-
1762
- if self.do_classifier_free_guidance:
1763
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1764
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1765
- add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
1766
-
1767
- prompt_embeds = prompt_embeds.to(device)
1768
- add_text_embeds = add_text_embeds.to(device)
1769
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1770
-
1771
- if ip_adapter_image is not None:
1772
- output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
1773
- image_embeds, negative_image_embeds = self.encode_image(
1774
- ip_adapter_image, device, num_images_per_prompt, output_hidden_state
1775
- )
1776
- if self.do_classifier_free_guidance:
1777
- image_embeds = torch.cat([negative_image_embeds, image_embeds])
1778
- image_embeds = image_embeds.to(device)
1779
-
1780
- # 8. Denoising loop
1781
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1782
-
1783
- # 8.1 Apply denoising_end
1784
- if (
1785
- self.denoising_end is not None
1786
- and isinstance(self.denoising_end, float)
1787
- and self.denoising_end > 0
1788
- and self.denoising_end < 1
1789
- ):
1790
- discrete_timestep_cutoff = int(
1791
- round(
1792
- self.scheduler.config.num_train_timesteps
1793
- - (self.denoising_end * self.scheduler.config.num_train_timesteps)
1794
- )
1795
- )
1796
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1797
- timesteps = timesteps[:num_inference_steps]
1798
-
1799
- # 9. Optionally get Guidance Scale Embedding
1800
- timestep_cond = None
1801
- if self.unet.config.time_cond_proj_dim is not None:
1802
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1803
- timestep_cond = self.get_guidance_scale_embedding(
1804
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1805
- ).to(device=device, dtype=latents.dtype)
1806
-
1807
- self._num_timesteps = len(timesteps)
1808
-
1809
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1810
- for i, t in enumerate(timesteps):
1811
- if self.interrupt:
1812
- continue
1813
-
1814
- # expand the latents if we are doing classifier free guidance
1815
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1816
-
1817
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1818
-
1819
- # predict the noise residual
1820
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1821
- if ip_adapter_image is not None:
1822
- added_cond_kwargs["image_embeds"] = image_embeds
1823
-
1824
- noise_pred = self.unet(
1825
- latent_model_input,
1826
- t,
1827
- encoder_hidden_states=prompt_embeds,
1828
- timestep_cond=timestep_cond,
1829
- cross_attention_kwargs=self.cross_attention_kwargs,
1830
- added_cond_kwargs=added_cond_kwargs,
1831
- return_dict=False,
1832
- )[0]
1833
-
1834
- # perform guidance
1835
- if self.do_classifier_free_guidance:
1836
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1837
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1838
-
1839
- if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1840
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1841
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1842
-
1843
- # compute the previous noisy sample x_t -> x_t-1
1844
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1845
-
1846
- if mask is not None and num_channels_unet == 4:
1847
- init_latents_proper = image_latents
1848
-
1849
- if self.do_classifier_free_guidance:
1850
- init_mask, _ = mask.chunk(2)
1851
- else:
1852
- init_mask = mask
1853
-
1854
- if i < len(timesteps) - 1:
1855
- noise_timestep = timesteps[i + 1]
1856
- init_latents_proper = self.scheduler.add_noise(
1857
- init_latents_proper, noise, torch.tensor([noise_timestep])
1858
- )
1859
-
1860
- latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1861
-
1862
- if callback_on_step_end is not None:
1863
- callback_kwargs = {}
1864
- for k in callback_on_step_end_tensor_inputs:
1865
- callback_kwargs[k] = locals()[k]
1866
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1867
-
1868
- latents = callback_outputs.pop("latents", latents)
1869
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1870
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1871
- add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
1872
- negative_pooled_prompt_embeds = callback_outputs.pop(
1873
- "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1874
- )
1875
- add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
1876
-
1877
- # call the callback, if provided
1878
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1879
- progress_bar.update()
1880
- if callback is not None and i % callback_steps == 0:
1881
- step_idx = i // getattr(self.scheduler, "order", 1)
1882
- callback(step_idx, t, latents)
1883
-
1884
- if XLA_AVAILABLE:
1885
- xm.mark_step()
1886
-
1887
- if not output_type == "latent":
1888
- # make sure the VAE is in float32 mode, as it overflows in float16
1889
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1890
-
1891
- if needs_upcasting:
1892
- self.upcast_vae()
1893
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1894
-
1895
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1896
-
1897
- # cast back to fp16 if needed
1898
- if needs_upcasting:
1899
- self.vae.to(dtype=torch.float16)
1900
- else:
1901
- image = latents
1902
-
1903
- if not output_type == "latent":
1904
- # apply watermark if available
1905
- if self.watermark is not None:
1906
- image = self.watermark.apply_watermark(image)
1907
-
1908
- image = self.image_processor.postprocess(image, output_type=output_type)
1909
-
1910
- # Offload all models
1911
- self.maybe_free_model_hooks()
1912
-
1913
- if not return_dict:
1914
- return (image,)
1915
-
1916
- return StableDiffusionXLPipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_boxdiff.py DELETED
@@ -1,1700 +0,0 @@
1
- # Copyright 2024 Jingyang Zhang and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import abc
16
- import inspect
17
- import math
18
- import numbers
19
- from typing import Any, Callable, Dict, List, Optional, Union
20
-
21
- import numpy as np
22
- import torch
23
- import torch.nn as nn
24
- import torch.nn.functional as F
25
- from packaging import version
26
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
27
-
28
- from diffusers.configuration_utils import FrozenDict
29
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
- from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
31
- from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
32
- from diffusers.models.attention_processor import Attention, FusedAttnProcessor2_0
33
- from diffusers.models.lora import adjust_lora_scale_text_encoder
34
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline
35
- from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
36
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
37
- from diffusers.schedulers import KarrasDiffusionSchedulers
38
- from diffusers.utils import (
39
- USE_PEFT_BACKEND,
40
- deprecate,
41
- logging,
42
- replace_example_docstring,
43
- scale_lora_layers,
44
- unscale_lora_layers,
45
- )
46
- from diffusers.utils.torch_utils import randn_tensor
47
-
48
-
49
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
50
-
51
- EXAMPLE_DOC_STRING = """
52
- Examples:
53
- ```py
54
- >>> import torch
55
- >>> from diffusers import StableDiffusionPipeline
56
-
57
- >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
58
- >>> pipe = pipe.to("cuda")
59
-
60
- >>> prompt = "a photo of an astronaut riding a horse on mars"
61
- >>> image = pipe(prompt).images[0]
62
- ```
63
- """
64
-
65
-
66
- class GaussianSmoothing(nn.Module):
67
- """
68
- Copied from official repo: https://github.com/showlab/BoxDiff/blob/master/utils/gaussian_smoothing.py
69
- Apply gaussian smoothing on a
70
- 1d, 2d or 3d tensor. Filtering is performed seperately for each channel
71
- in the input using a depthwise convolution.
72
- Arguments:
73
- channels (int, sequence): Number of channels of the input tensors. Output will
74
- have this number of channels as well.
75
- kernel_size (int, sequence): Size of the gaussian kernel.
76
- sigma (float, sequence): Standard deviation of the gaussian kernel.
77
- dim (int, optional): The number of dimensions of the data.
78
- Default value is 2 (spatial).
79
- """
80
-
81
- def __init__(self, channels, kernel_size, sigma, dim=2):
82
- super(GaussianSmoothing, self).__init__()
83
- if isinstance(kernel_size, numbers.Number):
84
- kernel_size = [kernel_size] * dim
85
- if isinstance(sigma, numbers.Number):
86
- sigma = [sigma] * dim
87
-
88
- # The gaussian kernel is the product of the
89
- # gaussian function of each dimension.
90
- kernel = 1
91
- meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
92
- for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
93
- mean = (size - 1) / 2
94
- kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2))
95
-
96
- # Make sure sum of values in gaussian kernel equals 1.
97
- kernel = kernel / torch.sum(kernel)
98
-
99
- # Reshape to depthwise convolutional weight
100
- kernel = kernel.view(1, 1, *kernel.size())
101
- kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
102
-
103
- self.register_buffer("weight", kernel)
104
- self.groups = channels
105
-
106
- if dim == 1:
107
- self.conv = F.conv1d
108
- elif dim == 2:
109
- self.conv = F.conv2d
110
- elif dim == 3:
111
- self.conv = F.conv3d
112
- else:
113
- raise RuntimeError("Only 1, 2 and 3 dimensions are supported. Received {}.".format(dim))
114
-
115
- def forward(self, input):
116
- """
117
- Apply gaussian filter to input.
118
- Arguments:
119
- input (torch.Tensor): Input to apply gaussian filter on.
120
- Returns:
121
- filtered (torch.Tensor): Filtered output.
122
- """
123
- return self.conv(input, weight=self.weight.to(input.dtype), groups=self.groups)
124
-
125
-
126
- class AttendExciteCrossAttnProcessor:
127
- def __init__(self, attnstore, place_in_unet):
128
- super().__init__()
129
- self.attnstore = attnstore
130
- self.place_in_unet = place_in_unet
131
-
132
- def __call__(
133
- self,
134
- attn: Attention,
135
- hidden_states: torch.FloatTensor,
136
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
137
- attention_mask: Optional[torch.FloatTensor] = None,
138
- ) -> torch.Tensor:
139
- batch_size, sequence_length, _ = hidden_states.shape
140
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size=1)
141
- query = attn.to_q(hidden_states)
142
-
143
- is_cross = encoder_hidden_states is not None
144
- encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
145
- key = attn.to_k(encoder_hidden_states)
146
- value = attn.to_v(encoder_hidden_states)
147
-
148
- query = attn.head_to_batch_dim(query)
149
- key = attn.head_to_batch_dim(key)
150
- value = attn.head_to_batch_dim(value)
151
-
152
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
153
- self.attnstore(attention_probs, is_cross, self.place_in_unet)
154
-
155
- hidden_states = torch.bmm(attention_probs, value)
156
- hidden_states = attn.batch_to_head_dim(hidden_states)
157
-
158
- # linear proj
159
- hidden_states = attn.to_out[0](hidden_states)
160
- # dropout
161
- hidden_states = attn.to_out[1](hidden_states)
162
-
163
- return hidden_states
164
-
165
-
166
- class AttentionControl(abc.ABC):
167
- def step_callback(self, x_t):
168
- return x_t
169
-
170
- def between_steps(self):
171
- return
172
-
173
- # @property
174
- # def num_uncond_att_layers(self):
175
- # return 0
176
-
177
- @abc.abstractmethod
178
- def forward(self, attn, is_cross: bool, place_in_unet: str):
179
- raise NotImplementedError
180
-
181
- def __call__(self, attn, is_cross: bool, place_in_unet: str):
182
- if self.cur_att_layer >= self.num_uncond_att_layers:
183
- self.forward(attn, is_cross, place_in_unet)
184
- self.cur_att_layer += 1
185
- if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:
186
- self.cur_att_layer = 0
187
- self.cur_step += 1
188
- self.between_steps()
189
-
190
- def reset(self):
191
- self.cur_step = 0
192
- self.cur_att_layer = 0
193
-
194
- def __init__(self):
195
- self.cur_step = 0
196
- self.num_att_layers = -1
197
- self.cur_att_layer = 0
198
-
199
-
200
- class AttentionStore(AttentionControl):
201
- @staticmethod
202
- def get_empty_store():
203
- return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []}
204
-
205
- def forward(self, attn, is_cross: bool, place_in_unet: str):
206
- key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
207
- if attn.shape[1] <= 32**2: # avoid memory overhead
208
- self.step_store[key].append(attn)
209
- return attn
210
-
211
- def between_steps(self):
212
- self.attention_store = self.step_store
213
- if self.save_global_store:
214
- with torch.no_grad():
215
- if len(self.global_store) == 0:
216
- self.global_store = self.step_store
217
- else:
218
- for key in self.global_store:
219
- for i in range(len(self.global_store[key])):
220
- self.global_store[key][i] += self.step_store[key][i].detach()
221
- self.step_store = self.get_empty_store()
222
- self.step_store = self.get_empty_store()
223
-
224
- def get_average_attention(self):
225
- average_attention = self.attention_store
226
- return average_attention
227
-
228
- def get_average_global_attention(self):
229
- average_attention = {
230
- key: [item / self.cur_step for item in self.global_store[key]] for key in self.attention_store
231
- }
232
- return average_attention
233
-
234
- def reset(self):
235
- super(AttentionStore, self).reset()
236
- self.step_store = self.get_empty_store()
237
- self.attention_store = {}
238
- self.global_store = {}
239
-
240
- def __init__(self, save_global_store=False):
241
- """
242
- Initialize an empty AttentionStore
243
- :param step_index: used to visualize only a specific step in the diffusion process
244
- """
245
- super(AttentionStore, self).__init__()
246
- self.save_global_store = save_global_store
247
- self.step_store = self.get_empty_store()
248
- self.attention_store = {}
249
- self.global_store = {}
250
- self.curr_step_index = 0
251
- self.num_uncond_att_layers = 0
252
-
253
-
254
- def aggregate_attention(
255
- attention_store: AttentionStore, res: int, from_where: List[str], is_cross: bool, select: int
256
- ) -> torch.Tensor:
257
- """Aggregates the attention across the different layers and heads at the specified resolution."""
258
- out = []
259
- attention_maps = attention_store.get_average_attention()
260
-
261
- # for k, v in attention_maps.items():
262
- # for vv in v:
263
- # print(vv.shape)
264
- # exit()
265
-
266
- num_pixels = res**2
267
- for location in from_where:
268
- for item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]:
269
- if item.shape[1] == num_pixels:
270
- cross_maps = item.reshape(1, -1, res, res, item.shape[-1])[select]
271
- out.append(cross_maps)
272
- out = torch.cat(out, dim=0)
273
- out = out.sum(0) / out.shape[0]
274
- return out
275
-
276
-
277
- def register_attention_control(model, controller):
278
- attn_procs = {}
279
- cross_att_count = 0
280
- for name in model.unet.attn_processors.keys():
281
- # cross_attention_dim = None if name.endswith("attn1.processor") else model.unet.config.cross_attention_dim
282
- if name.startswith("mid_block"):
283
- # hidden_size = model.unet.config.block_out_channels[-1]
284
- place_in_unet = "mid"
285
- elif name.startswith("up_blocks"):
286
- # block_id = int(name[len("up_blocks.")])
287
- # hidden_size = list(reversed(model.unet.config.block_out_channels))[block_id]
288
- place_in_unet = "up"
289
- elif name.startswith("down_blocks"):
290
- # block_id = int(name[len("down_blocks.")])
291
- # hidden_size = model.unet.config.block_out_channels[block_id]
292
- place_in_unet = "down"
293
- else:
294
- continue
295
-
296
- cross_att_count += 1
297
- attn_procs[name] = AttendExciteCrossAttnProcessor(attnstore=controller, place_in_unet=place_in_unet)
298
- model.unet.set_attn_processor(attn_procs)
299
- controller.num_att_layers = cross_att_count
300
-
301
-
302
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
303
- """
304
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
305
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
306
- """
307
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
308
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
309
- # rescale the results from guidance (fixes overexposure)
310
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
311
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
312
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
313
- return noise_cfg
314
-
315
-
316
- def retrieve_timesteps(
317
- scheduler,
318
- num_inference_steps: Optional[int] = None,
319
- device: Optional[Union[str, torch.device]] = None,
320
- timesteps: Optional[List[int]] = None,
321
- **kwargs,
322
- ):
323
- """
324
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
325
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
326
-
327
- Args:
328
- scheduler (`SchedulerMixin`):
329
- The scheduler to get timesteps from.
330
- num_inference_steps (`int`):
331
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
332
- `timesteps` must be `None`.
333
- device (`str` or `torch.device`, *optional*):
334
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
335
- timesteps (`List[int]`, *optional*):
336
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
337
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
338
- must be `None`.
339
-
340
- Returns:
341
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
342
- second element is the number of inference steps.
343
- """
344
- if timesteps is not None:
345
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
346
- if not accepts_timesteps:
347
- raise ValueError(
348
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
349
- f" timestep schedules. Please check whether you are using the correct scheduler."
350
- )
351
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
352
- timesteps = scheduler.timesteps
353
- num_inference_steps = len(timesteps)
354
- else:
355
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
356
- timesteps = scheduler.timesteps
357
- return timesteps, num_inference_steps
358
-
359
-
360
- class StableDiffusionBoxDiffPipeline(
361
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
362
- ):
363
- r"""
364
- Pipeline for text-to-image generation using Stable Diffusion with BoxDiff.
365
-
366
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
367
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
368
-
369
- The pipeline also inherits the following loading methods:
370
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
371
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
372
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
373
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
374
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
375
-
376
- Args:
377
- vae ([`AutoencoderKL`]):
378
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
379
- text_encoder ([`~transformers.CLIPTextModel`]):
380
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
381
- tokenizer ([`~transformers.CLIPTokenizer`]):
382
- A `CLIPTokenizer` to tokenize text.
383
- unet ([`UNet2DConditionModel`]):
384
- A `UNet2DConditionModel` to denoise the encoded image latents.
385
- scheduler ([`SchedulerMixin`]):
386
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
387
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
388
- safety_checker ([`StableDiffusionSafetyChecker`]):
389
- Classification module that estimates whether generated images could be considered offensive or harmful.
390
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
391
- about a model's potential harms.
392
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
393
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
394
- """
395
-
396
- model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
397
- _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
398
- _exclude_from_cpu_offload = ["safety_checker"]
399
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
400
-
401
- def __init__(
402
- self,
403
- vae: AutoencoderKL,
404
- text_encoder: CLIPTextModel,
405
- tokenizer: CLIPTokenizer,
406
- unet: UNet2DConditionModel,
407
- scheduler: KarrasDiffusionSchedulers,
408
- safety_checker: StableDiffusionSafetyChecker,
409
- feature_extractor: CLIPImageProcessor,
410
- image_encoder: CLIPVisionModelWithProjection = None,
411
- requires_safety_checker: bool = True,
412
- ):
413
- super().__init__()
414
-
415
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
416
- deprecation_message = (
417
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
418
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
419
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
420
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
421
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
422
- " file"
423
- )
424
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
425
- new_config = dict(scheduler.config)
426
- new_config["steps_offset"] = 1
427
- scheduler._internal_dict = FrozenDict(new_config)
428
-
429
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
430
- deprecation_message = (
431
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
432
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
433
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
434
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
435
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
436
- )
437
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
438
- new_config = dict(scheduler.config)
439
- new_config["clip_sample"] = False
440
- scheduler._internal_dict = FrozenDict(new_config)
441
-
442
- if safety_checker is None and requires_safety_checker:
443
- logger.warning(
444
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
445
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
446
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
447
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
448
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
449
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
450
- )
451
-
452
- if safety_checker is not None and feature_extractor is None:
453
- raise ValueError(
454
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
455
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
456
- )
457
-
458
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
459
- version.parse(unet.config._diffusers_version).base_version
460
- ) < version.parse("0.9.0.dev0")
461
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
462
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
463
- deprecation_message = (
464
- "The configuration file of the unet has set the default `sample_size` to smaller than"
465
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
466
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
467
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
468
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
469
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
470
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
471
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
472
- " the `unet/config.json` file"
473
- )
474
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
475
- new_config = dict(unet.config)
476
- new_config["sample_size"] = 64
477
- unet._internal_dict = FrozenDict(new_config)
478
-
479
- self.register_modules(
480
- vae=vae,
481
- text_encoder=text_encoder,
482
- tokenizer=tokenizer,
483
- unet=unet,
484
- scheduler=scheduler,
485
- safety_checker=safety_checker,
486
- feature_extractor=feature_extractor,
487
- image_encoder=image_encoder,
488
- )
489
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
490
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
491
- self.register_to_config(requires_safety_checker=requires_safety_checker)
492
-
493
- def enable_vae_slicing(self):
494
- r"""
495
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
496
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
497
- """
498
- self.vae.enable_slicing()
499
-
500
- def disable_vae_slicing(self):
501
- r"""
502
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
503
- computing decoding in one step.
504
- """
505
- self.vae.disable_slicing()
506
-
507
- def enable_vae_tiling(self):
508
- r"""
509
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
510
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
511
- processing larger images.
512
- """
513
- self.vae.enable_tiling()
514
-
515
- def disable_vae_tiling(self):
516
- r"""
517
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
518
- computing decoding in one step.
519
- """
520
- self.vae.disable_tiling()
521
-
522
- def _encode_prompt(
523
- self,
524
- prompt,
525
- device,
526
- num_images_per_prompt,
527
- do_classifier_free_guidance,
528
- negative_prompt=None,
529
- prompt_embeds: Optional[torch.FloatTensor] = None,
530
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
531
- lora_scale: Optional[float] = None,
532
- **kwargs,
533
- ):
534
- deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
535
- deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
536
-
537
- prompt_embeds_tuple = self.encode_prompt(
538
- prompt=prompt,
539
- device=device,
540
- num_images_per_prompt=num_images_per_prompt,
541
- do_classifier_free_guidance=do_classifier_free_guidance,
542
- negative_prompt=negative_prompt,
543
- prompt_embeds=prompt_embeds,
544
- negative_prompt_embeds=negative_prompt_embeds,
545
- lora_scale=lora_scale,
546
- **kwargs,
547
- )
548
-
549
- # concatenate for backwards comp
550
- prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
551
-
552
- return prompt_embeds
553
-
554
- def encode_prompt(
555
- self,
556
- prompt,
557
- device,
558
- num_images_per_prompt,
559
- do_classifier_free_guidance,
560
- negative_prompt=None,
561
- prompt_embeds: Optional[torch.FloatTensor] = None,
562
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
563
- lora_scale: Optional[float] = None,
564
- clip_skip: Optional[int] = None,
565
- ):
566
- r"""
567
- Encodes the prompt into text encoder hidden states.
568
-
569
- Args:
570
- prompt (`str` or `List[str]`, *optional*):
571
- prompt to be encoded
572
- device: (`torch.device`):
573
- torch device
574
- num_images_per_prompt (`int`):
575
- number of images that should be generated per prompt
576
- do_classifier_free_guidance (`bool`):
577
- whether to use classifier free guidance or not
578
- negative_prompt (`str` or `List[str]`, *optional*):
579
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
580
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
581
- less than `1`).
582
- prompt_embeds (`torch.FloatTensor`, *optional*):
583
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
584
- provided, text embeddings will be generated from `prompt` input argument.
585
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
586
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
587
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
588
- argument.
589
- lora_scale (`float`, *optional*):
590
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
591
- clip_skip (`int`, *optional*):
592
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
593
- the output of the pre-final layer will be used for computing the prompt embeddings.
594
- """
595
- # set lora scale so that monkey patched LoRA
596
- # function of text encoder can correctly access it
597
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
598
- self._lora_scale = lora_scale
599
-
600
- # dynamically adjust the LoRA scale
601
- if not USE_PEFT_BACKEND:
602
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
603
- else:
604
- scale_lora_layers(self.text_encoder, lora_scale)
605
-
606
- if prompt is not None and isinstance(prompt, str):
607
- batch_size = 1
608
- elif prompt is not None and isinstance(prompt, list):
609
- batch_size = len(prompt)
610
- else:
611
- batch_size = prompt_embeds.shape[0]
612
-
613
- if prompt_embeds is None:
614
- # textual inversion: procecss multi-vector tokens if necessary
615
- if isinstance(self, TextualInversionLoaderMixin):
616
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
617
-
618
- text_inputs = self.tokenizer(
619
- prompt,
620
- padding="max_length",
621
- max_length=self.tokenizer.model_max_length,
622
- truncation=True,
623
- return_tensors="pt",
624
- )
625
- text_input_ids = text_inputs.input_ids
626
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
627
-
628
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
629
- text_input_ids, untruncated_ids
630
- ):
631
- removed_text = self.tokenizer.batch_decode(
632
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
633
- )
634
- logger.warning(
635
- "The following part of your input was truncated because CLIP can only handle sequences up to"
636
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
637
- )
638
-
639
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
640
- attention_mask = text_inputs.attention_mask.to(device)
641
- else:
642
- attention_mask = None
643
-
644
- if clip_skip is None:
645
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
646
- prompt_embeds = prompt_embeds[0]
647
- else:
648
- prompt_embeds = self.text_encoder(
649
- text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
650
- )
651
- # Access the `hidden_states` first, that contains a tuple of
652
- # all the hidden states from the encoder layers. Then index into
653
- # the tuple to access the hidden states from the desired layer.
654
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
655
- # We also need to apply the final LayerNorm here to not mess with the
656
- # representations. The `last_hidden_states` that we typically use for
657
- # obtaining the final prompt representations passes through the LayerNorm
658
- # layer.
659
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
660
-
661
- if self.text_encoder is not None:
662
- prompt_embeds_dtype = self.text_encoder.dtype
663
- elif self.unet is not None:
664
- prompt_embeds_dtype = self.unet.dtype
665
- else:
666
- prompt_embeds_dtype = prompt_embeds.dtype
667
-
668
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
669
-
670
- bs_embed, seq_len, _ = prompt_embeds.shape
671
- # duplicate text embeddings for each generation per prompt, using mps friendly method
672
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
673
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
674
-
675
- # get unconditional embeddings for classifier free guidance
676
- if do_classifier_free_guidance and negative_prompt_embeds is None:
677
- uncond_tokens: List[str]
678
- if negative_prompt is None:
679
- uncond_tokens = [""] * batch_size
680
- elif prompt is not None and type(prompt) is not type(negative_prompt):
681
- raise TypeError(
682
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
683
- f" {type(prompt)}."
684
- )
685
- elif isinstance(negative_prompt, str):
686
- uncond_tokens = [negative_prompt]
687
- elif batch_size != len(negative_prompt):
688
- raise ValueError(
689
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
690
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
691
- " the batch size of `prompt`."
692
- )
693
- else:
694
- uncond_tokens = negative_prompt
695
-
696
- # textual inversion: procecss multi-vector tokens if necessary
697
- if isinstance(self, TextualInversionLoaderMixin):
698
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
699
-
700
- max_length = prompt_embeds.shape[1]
701
- uncond_input = self.tokenizer(
702
- uncond_tokens,
703
- padding="max_length",
704
- max_length=max_length,
705
- truncation=True,
706
- return_tensors="pt",
707
- )
708
-
709
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
710
- attention_mask = uncond_input.attention_mask.to(device)
711
- else:
712
- attention_mask = None
713
-
714
- negative_prompt_embeds = self.text_encoder(
715
- uncond_input.input_ids.to(device),
716
- attention_mask=attention_mask,
717
- )
718
- negative_prompt_embeds = negative_prompt_embeds[0]
719
-
720
- if do_classifier_free_guidance:
721
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
722
- seq_len = negative_prompt_embeds.shape[1]
723
-
724
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
725
-
726
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
727
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
728
-
729
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
730
- # Retrieve the original scale by scaling back the LoRA layers
731
- unscale_lora_layers(self.text_encoder, lora_scale)
732
-
733
- return text_inputs, prompt_embeds, negative_prompt_embeds
734
-
735
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
736
- dtype = next(self.image_encoder.parameters()).dtype
737
-
738
- if not isinstance(image, torch.Tensor):
739
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
740
-
741
- image = image.to(device=device, dtype=dtype)
742
- if output_hidden_states:
743
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
744
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
745
- uncond_image_enc_hidden_states = self.image_encoder(
746
- torch.zeros_like(image), output_hidden_states=True
747
- ).hidden_states[-2]
748
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
749
- num_images_per_prompt, dim=0
750
- )
751
- return image_enc_hidden_states, uncond_image_enc_hidden_states
752
- else:
753
- image_embeds = self.image_encoder(image).image_embeds
754
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
755
- uncond_image_embeds = torch.zeros_like(image_embeds)
756
-
757
- return image_embeds, uncond_image_embeds
758
-
759
- def run_safety_checker(self, image, device, dtype):
760
- if self.safety_checker is None:
761
- has_nsfw_concept = None
762
- else:
763
- if torch.is_tensor(image):
764
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
765
- else:
766
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
767
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
768
- image, has_nsfw_concept = self.safety_checker(
769
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
770
- )
771
- return image, has_nsfw_concept
772
-
773
- def decode_latents(self, latents):
774
- deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
775
- deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
776
-
777
- latents = 1 / self.vae.config.scaling_factor * latents
778
- image = self.vae.decode(latents, return_dict=False)[0]
779
- image = (image / 2 + 0.5).clamp(0, 1)
780
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
781
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
782
- return image
783
-
784
- def prepare_extra_step_kwargs(self, generator, eta):
785
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
786
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
787
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
788
- # and should be between [0, 1]
789
-
790
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
791
- extra_step_kwargs = {}
792
- if accepts_eta:
793
- extra_step_kwargs["eta"] = eta
794
-
795
- # check if the scheduler accepts generator
796
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
797
- if accepts_generator:
798
- extra_step_kwargs["generator"] = generator
799
- return extra_step_kwargs
800
-
801
- def check_inputs(
802
- self,
803
- prompt,
804
- height,
805
- width,
806
- boxdiff_phrases,
807
- boxdiff_boxes,
808
- callback_steps,
809
- negative_prompt=None,
810
- prompt_embeds=None,
811
- negative_prompt_embeds=None,
812
- callback_on_step_end_tensor_inputs=None,
813
- ):
814
- if height % 8 != 0 or width % 8 != 0:
815
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
816
-
817
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
818
- raise ValueError(
819
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
820
- f" {type(callback_steps)}."
821
- )
822
- if callback_on_step_end_tensor_inputs is not None and not all(
823
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
824
- ):
825
- raise ValueError(
826
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
827
- )
828
-
829
- if prompt is not None and prompt_embeds is not None:
830
- raise ValueError(
831
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
832
- " only forward one of the two."
833
- )
834
- elif prompt is None and prompt_embeds is None:
835
- raise ValueError(
836
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
837
- )
838
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
839
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
840
-
841
- if negative_prompt is not None and negative_prompt_embeds is not None:
842
- raise ValueError(
843
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
844
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
845
- )
846
-
847
- if prompt_embeds is not None and negative_prompt_embeds is not None:
848
- if prompt_embeds.shape != negative_prompt_embeds.shape:
849
- raise ValueError(
850
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
851
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
852
- f" {negative_prompt_embeds.shape}."
853
- )
854
-
855
- if boxdiff_phrases is not None or boxdiff_boxes is not None:
856
- if not (boxdiff_phrases is not None and boxdiff_boxes is not None):
857
- raise ValueError("Either both `boxdiff_phrases` and `boxdiff_boxes` must be passed or none of them.")
858
-
859
- if not isinstance(boxdiff_phrases, list) or not isinstance(boxdiff_boxes, list):
860
- raise ValueError("`boxdiff_phrases` and `boxdiff_boxes` must be lists.")
861
-
862
- if len(boxdiff_phrases) != len(boxdiff_boxes):
863
- raise ValueError(
864
- "`boxdiff_phrases` and `boxdiff_boxes` must have the same length,"
865
- f" got: `boxdiff_phrases` {len(boxdiff_phrases)} != `boxdiff_boxes`"
866
- f" {len(boxdiff_boxes)}."
867
- )
868
-
869
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
870
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
871
- if isinstance(generator, list) and len(generator) != batch_size:
872
- raise ValueError(
873
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
874
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
875
- )
876
-
877
- if latents is None:
878
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
879
- else:
880
- latents = latents.to(device)
881
-
882
- # scale the initial noise by the standard deviation required by the scheduler
883
- latents = latents * self.scheduler.init_noise_sigma
884
- return latents
885
-
886
- def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
887
- r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
888
-
889
- The suffixes after the scaling factors represent the stages where they are being applied.
890
-
891
- Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
892
- that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
893
-
894
- Args:
895
- s1 (`float`):
896
- Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
897
- mitigate "oversmoothing effect" in the enhanced denoising process.
898
- s2 (`float`):
899
- Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
900
- mitigate "oversmoothing effect" in the enhanced denoising process.
901
- b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
902
- b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
903
- """
904
- if not hasattr(self, "unet"):
905
- raise ValueError("The pipeline must have `unet` for using FreeU.")
906
- self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
907
-
908
- def disable_freeu(self):
909
- """Disables the FreeU mechanism if enabled."""
910
- self.unet.disable_freeu()
911
-
912
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
913
- def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
914
- """
915
- Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
916
- key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
917
-
918
- <Tip warning={true}>
919
-
920
- This API is 🧪 experimental.
921
-
922
- </Tip>
923
-
924
- Args:
925
- unet (`bool`, defaults to `True`): To apply fusion on the UNet.
926
- vae (`bool`, defaults to `True`): To apply fusion on the VAE.
927
- """
928
- self.fusing_unet = False
929
- self.fusing_vae = False
930
-
931
- if unet:
932
- self.fusing_unet = True
933
- self.unet.fuse_qkv_projections()
934
- self.unet.set_attn_processor(FusedAttnProcessor2_0())
935
-
936
- if vae:
937
- if not isinstance(self.vae, AutoencoderKL):
938
- raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
939
-
940
- self.fusing_vae = True
941
- self.vae.fuse_qkv_projections()
942
- self.vae.set_attn_processor(FusedAttnProcessor2_0())
943
-
944
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
945
- def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
946
- """Disable QKV projection fusion if enabled.
947
-
948
- <Tip warning={true}>
949
-
950
- This API is 🧪 experimental.
951
-
952
- </Tip>
953
-
954
- Args:
955
- unet (`bool`, defaults to `True`): To apply fusion on the UNet.
956
- vae (`bool`, defaults to `True`): To apply fusion on the VAE.
957
-
958
- """
959
- if unet:
960
- if not self.fusing_unet:
961
- logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
962
- else:
963
- self.unet.unfuse_qkv_projections()
964
- self.fusing_unet = False
965
-
966
- if vae:
967
- if not self.fusing_vae:
968
- logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
969
- else:
970
- self.vae.unfuse_qkv_projections()
971
- self.fusing_vae = False
972
-
973
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
974
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
975
- """
976
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
977
-
978
- Args:
979
- timesteps (`torch.Tensor`):
980
- generate embedding vectors at these timesteps
981
- embedding_dim (`int`, *optional*, defaults to 512):
982
- dimension of the embeddings to generate
983
- dtype:
984
- data type of the generated embeddings
985
-
986
- Returns:
987
- `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
988
- """
989
- assert len(w.shape) == 1
990
- w = w * 1000.0
991
-
992
- half_dim = embedding_dim // 2
993
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
994
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
995
- emb = w.to(dtype)[:, None] * emb[None, :]
996
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
997
- if embedding_dim % 2 == 1: # zero pad
998
- emb = torch.nn.functional.pad(emb, (0, 1))
999
- assert emb.shape == (w.shape[0], embedding_dim)
1000
- return emb
1001
-
1002
- @property
1003
- def guidance_scale(self):
1004
- return self._guidance_scale
1005
-
1006
- @property
1007
- def guidance_rescale(self):
1008
- return self._guidance_rescale
1009
-
1010
- @property
1011
- def clip_skip(self):
1012
- return self._clip_skip
1013
-
1014
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1015
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1016
- # corresponds to doing no classifier free guidance.
1017
- @property
1018
- def do_classifier_free_guidance(self):
1019
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
1020
-
1021
- @property
1022
- def cross_attention_kwargs(self):
1023
- return self._cross_attention_kwargs
1024
-
1025
- @property
1026
- def num_timesteps(self):
1027
- return self._num_timesteps
1028
-
1029
- @property
1030
- def interrupt(self):
1031
- return self._interrupt
1032
-
1033
- def _compute_max_attention_per_index(
1034
- self,
1035
- attention_maps: torch.Tensor,
1036
- indices_to_alter: List[int],
1037
- smooth_attentions: bool = False,
1038
- sigma: float = 0.5,
1039
- kernel_size: int = 3,
1040
- normalize_eot: bool = False,
1041
- bboxes: List[int] = None,
1042
- L: int = 1,
1043
- P: float = 0.2,
1044
- ) -> List[torch.Tensor]:
1045
- """Computes the maximum attention value for each of the tokens we wish to alter."""
1046
- last_idx = -1
1047
- if normalize_eot:
1048
- prompt = self.prompt
1049
- if isinstance(self.prompt, list):
1050
- prompt = self.prompt[0]
1051
- last_idx = len(self.tokenizer(prompt)["input_ids"]) - 1
1052
- attention_for_text = attention_maps[:, :, 1:last_idx]
1053
- attention_for_text *= 100
1054
- attention_for_text = torch.nn.functional.softmax(attention_for_text, dim=-1)
1055
-
1056
- # Shift indices since we removed the first token "1:last_idx"
1057
- indices_to_alter = [index - 1 for index in indices_to_alter]
1058
-
1059
- # Extract the maximum values
1060
- max_indices_list_fg = []
1061
- max_indices_list_bg = []
1062
- dist_x = []
1063
- dist_y = []
1064
-
1065
- cnt = 0
1066
- for i in indices_to_alter:
1067
- image = attention_for_text[:, :, i]
1068
-
1069
- # TODO
1070
- # box = [max(round(b / (512 / image.shape[0])), 0) for b in bboxes[cnt]]
1071
- # x1, y1, x2, y2 = box
1072
- H, W = image.shape
1073
- x1 = min(max(round(bboxes[cnt][0] * W), 0), W)
1074
- y1 = min(max(round(bboxes[cnt][1] * H), 0), H)
1075
- x2 = min(max(round(bboxes[cnt][2] * W), 0), W)
1076
- y2 = min(max(round(bboxes[cnt][3] * H), 0), H)
1077
- box = [x1, y1, x2, y2]
1078
- cnt += 1
1079
-
1080
- # coordinates to masks
1081
- obj_mask = torch.zeros_like(image)
1082
- ones_mask = torch.ones([y2 - y1, x2 - x1], dtype=obj_mask.dtype).to(obj_mask.device)
1083
- obj_mask[y1:y2, x1:x2] = ones_mask
1084
- bg_mask = 1 - obj_mask
1085
-
1086
- if smooth_attentions:
1087
- smoothing = GaussianSmoothing(channels=1, kernel_size=kernel_size, sigma=sigma, dim=2).to(image.device)
1088
- input = F.pad(image.unsqueeze(0).unsqueeze(0), (1, 1, 1, 1), mode="reflect")
1089
- image = smoothing(input).squeeze(0).squeeze(0)
1090
-
1091
- # Inner-Box constraint
1092
- k = (obj_mask.sum() * P).long()
1093
- max_indices_list_fg.append((image * obj_mask).reshape(-1).topk(k)[0].mean())
1094
-
1095
- # Outer-Box constraint
1096
- k = (bg_mask.sum() * P).long()
1097
- max_indices_list_bg.append((image * bg_mask).reshape(-1).topk(k)[0].mean())
1098
-
1099
- # Corner Constraint
1100
- gt_proj_x = torch.max(obj_mask, dim=0)[0]
1101
- gt_proj_y = torch.max(obj_mask, dim=1)[0]
1102
- corner_mask_x = torch.zeros_like(gt_proj_x)
1103
- corner_mask_y = torch.zeros_like(gt_proj_y)
1104
-
1105
- # create gt according to the number config.L
1106
- N = gt_proj_x.shape[0]
1107
- corner_mask_x[max(box[0] - L, 0) : min(box[0] + L + 1, N)] = 1.0
1108
- corner_mask_x[max(box[2] - L, 0) : min(box[2] + L + 1, N)] = 1.0
1109
- corner_mask_y[max(box[1] - L, 0) : min(box[1] + L + 1, N)] = 1.0
1110
- corner_mask_y[max(box[3] - L, 0) : min(box[3] + L + 1, N)] = 1.0
1111
- dist_x.append((F.l1_loss(image.max(dim=0)[0], gt_proj_x, reduction="none") * corner_mask_x).mean())
1112
- dist_y.append((F.l1_loss(image.max(dim=1)[0], gt_proj_y, reduction="none") * corner_mask_y).mean())
1113
-
1114
- return max_indices_list_fg, max_indices_list_bg, dist_x, dist_y
1115
-
1116
- def _aggregate_and_get_max_attention_per_token(
1117
- self,
1118
- attention_store: AttentionStore,
1119
- indices_to_alter: List[int],
1120
- attention_res: int = 16,
1121
- smooth_attentions: bool = False,
1122
- sigma: float = 0.5,
1123
- kernel_size: int = 3,
1124
- normalize_eot: bool = False,
1125
- bboxes: List[int] = None,
1126
- L: int = 1,
1127
- P: float = 0.2,
1128
- ):
1129
- """Aggregates the attention for each token and computes the max activation value for each token to alter."""
1130
- attention_maps = aggregate_attention(
1131
- attention_store=attention_store,
1132
- res=attention_res,
1133
- from_where=("up", "down", "mid"),
1134
- is_cross=True,
1135
- select=0,
1136
- )
1137
- max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y = self._compute_max_attention_per_index(
1138
- attention_maps=attention_maps,
1139
- indices_to_alter=indices_to_alter,
1140
- smooth_attentions=smooth_attentions,
1141
- sigma=sigma,
1142
- kernel_size=kernel_size,
1143
- normalize_eot=normalize_eot,
1144
- bboxes=bboxes,
1145
- L=L,
1146
- P=P,
1147
- )
1148
- return max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y
1149
-
1150
- @staticmethod
1151
- def _compute_loss(
1152
- max_attention_per_index_fg: List[torch.Tensor],
1153
- max_attention_per_index_bg: List[torch.Tensor],
1154
- dist_x: List[torch.Tensor],
1155
- dist_y: List[torch.Tensor],
1156
- return_losses: bool = False,
1157
- ) -> torch.Tensor:
1158
- """Computes the attend-and-excite loss using the maximum attention value for each token."""
1159
- losses_fg = [max(0, 1.0 - curr_max) for curr_max in max_attention_per_index_fg]
1160
- losses_bg = [max(0, curr_max) for curr_max in max_attention_per_index_bg]
1161
- loss = sum(losses_fg) + sum(losses_bg) + sum(dist_x) + sum(dist_y)
1162
- if return_losses:
1163
- return max(losses_fg), losses_fg
1164
- else:
1165
- return max(losses_fg), loss
1166
-
1167
- @staticmethod
1168
- def _update_latent(latents: torch.Tensor, loss: torch.Tensor, step_size: float) -> torch.Tensor:
1169
- """Update the latent according to the computed loss."""
1170
- grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents], retain_graph=True)[0]
1171
- latents = latents - step_size * grad_cond
1172
- return latents
1173
-
1174
- def _perform_iterative_refinement_step(
1175
- self,
1176
- latents: torch.Tensor,
1177
- indices_to_alter: List[int],
1178
- loss_fg: torch.Tensor,
1179
- threshold: float,
1180
- text_embeddings: torch.Tensor,
1181
- text_input,
1182
- attention_store: AttentionStore,
1183
- step_size: float,
1184
- t: int,
1185
- attention_res: int = 16,
1186
- smooth_attentions: bool = True,
1187
- sigma: float = 0.5,
1188
- kernel_size: int = 3,
1189
- max_refinement_steps: int = 20,
1190
- normalize_eot: bool = False,
1191
- bboxes: List[int] = None,
1192
- L: int = 1,
1193
- P: float = 0.2,
1194
- ):
1195
- """
1196
- Performs the iterative latent refinement introduced in the paper. Here, we continuously update the latent
1197
- code according to our loss objective until the given threshold is reached for all tokens.
1198
- """
1199
- iteration = 0
1200
- target_loss = max(0, 1.0 - threshold)
1201
-
1202
- while loss_fg > target_loss:
1203
- iteration += 1
1204
-
1205
- latents = latents.clone().detach().requires_grad_(True)
1206
- # noise_pred_text = self.unet(latents, t, encoder_hidden_states=text_embeddings[1].unsqueeze(0)).sample
1207
- self.unet.zero_grad()
1208
-
1209
- # Get max activation value for each subject token
1210
- (
1211
- max_attention_per_index_fg,
1212
- max_attention_per_index_bg,
1213
- dist_x,
1214
- dist_y,
1215
- ) = self._aggregate_and_get_max_attention_per_token(
1216
- attention_store=attention_store,
1217
- indices_to_alter=indices_to_alter,
1218
- attention_res=attention_res,
1219
- smooth_attentions=smooth_attentions,
1220
- sigma=sigma,
1221
- kernel_size=kernel_size,
1222
- normalize_eot=normalize_eot,
1223
- bboxes=bboxes,
1224
- L=L,
1225
- P=P,
1226
- )
1227
-
1228
- loss_fg, losses_fg = self._compute_loss(
1229
- max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y, return_losses=True
1230
- )
1231
-
1232
- if loss_fg != 0:
1233
- latents = self._update_latent(latents, loss_fg, step_size)
1234
-
1235
- # with torch.no_grad():
1236
- # noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=text_embeddings[0].unsqueeze(0)).sample
1237
- # noise_pred_text = self.unet(latents, t, encoder_hidden_states=text_embeddings[1].unsqueeze(0)).sample
1238
-
1239
- # try:
1240
- # low_token = np.argmax([l.item() if not isinstance(l, int) else l for l in losses_fg])
1241
- # except Exception as e:
1242
- # print(e) # catch edge case :)
1243
- # low_token = np.argmax(losses_fg)
1244
-
1245
- # low_word = self.tokenizer.decode(text_input.input_ids[0][indices_to_alter[low_token]])
1246
- # print(f'\t Try {iteration}. {low_word} has a max attention of {max_attention_per_index_fg[low_token]}')
1247
-
1248
- if iteration >= max_refinement_steps:
1249
- # print(f'\t Exceeded max number of iterations ({max_refinement_steps})! '
1250
- # f'Finished with a max attention of {max_attention_per_index_fg[low_token]}')
1251
- break
1252
-
1253
- # Run one more time but don't compute gradients and update the latents.
1254
- # We just need to compute the new loss - the grad update will occur below
1255
- latents = latents.clone().detach().requires_grad_(True)
1256
- # noise_pred_text = self.unet(latents, t, encoder_hidden_states=text_embeddings[1].unsqueeze(0)).sample
1257
- self.unet.zero_grad()
1258
-
1259
- # Get max activation value for each subject token
1260
- (
1261
- max_attention_per_index_fg,
1262
- max_attention_per_index_bg,
1263
- dist_x,
1264
- dist_y,
1265
- ) = self._aggregate_and_get_max_attention_per_token(
1266
- attention_store=attention_store,
1267
- indices_to_alter=indices_to_alter,
1268
- attention_res=attention_res,
1269
- smooth_attentions=smooth_attentions,
1270
- sigma=sigma,
1271
- kernel_size=kernel_size,
1272
- normalize_eot=normalize_eot,
1273
- bboxes=bboxes,
1274
- L=L,
1275
- P=P,
1276
- )
1277
- loss_fg, losses_fg = self._compute_loss(
1278
- max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y, return_losses=True
1279
- )
1280
- # print(f"\t Finished with loss of: {loss_fg}")
1281
- return loss_fg, latents, max_attention_per_index_fg
1282
-
1283
- @torch.no_grad()
1284
- @replace_example_docstring(EXAMPLE_DOC_STRING)
1285
- def __call__(
1286
- self,
1287
- prompt: Union[str, List[str]] = None,
1288
- boxdiff_phrases: List[str] = None,
1289
- boxdiff_boxes: List[List[float]] = None, # TODO
1290
- boxdiff_kwargs: Optional[Dict[str, Any]] = {
1291
- "attention_res": 16,
1292
- "P": 0.2,
1293
- "L": 1,
1294
- "max_iter_to_alter": 25,
1295
- "loss_thresholds": {0: 0.05, 10: 0.5, 20: 0.8},
1296
- "scale_factor": 20,
1297
- "scale_range": (1.0, 0.5),
1298
- "smooth_attentions": True,
1299
- "sigma": 0.5,
1300
- "kernel_size": 3,
1301
- "refine": False,
1302
- "normalize_eot": True,
1303
- },
1304
- height: Optional[int] = None,
1305
- width: Optional[int] = None,
1306
- num_inference_steps: int = 50,
1307
- timesteps: List[int] = None,
1308
- guidance_scale: float = 7.5,
1309
- negative_prompt: Optional[Union[str, List[str]]] = None,
1310
- num_images_per_prompt: Optional[int] = 1,
1311
- eta: float = 0.0,
1312
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1313
- latents: Optional[torch.FloatTensor] = None,
1314
- prompt_embeds: Optional[torch.FloatTensor] = None,
1315
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1316
- ip_adapter_image: Optional[PipelineImageInput] = None,
1317
- output_type: Optional[str] = "pil",
1318
- return_dict: bool = True,
1319
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1320
- guidance_rescale: float = 0.0,
1321
- clip_skip: Optional[int] = None,
1322
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1323
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1324
- **kwargs,
1325
- ):
1326
- r"""
1327
- The call function to the pipeline for generation.
1328
-
1329
- Args:
1330
- prompt (`str` or `List[str]`, *optional*):
1331
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
1332
-
1333
- boxdiff_attention_res (`int`, *optional*, defaults to 16):
1334
- The resolution of the attention maps used for computing the BoxDiff loss.
1335
- boxdiff_P (`float`, *optional*, defaults to 0.2):
1336
-
1337
- boxdiff_L (`int`, *optional*, defaults to 1):
1338
- The number of pixels around the corner to be selected in BoxDiff loss.
1339
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1340
- The height in pixels of the generated image.
1341
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1342
- The width in pixels of the generated image.
1343
- num_inference_steps (`int`, *optional*, defaults to 50):
1344
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1345
- expense of slower inference.
1346
- timesteps (`List[int]`, *optional*):
1347
- Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
1348
- in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
1349
- passed will be used. Must be in descending order.
1350
- guidance_scale (`float`, *optional*, defaults to 7.5):
1351
- A higher guidance scale value encourages the model to generate images closely linked to the text
1352
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
1353
- negative_prompt (`str` or `List[str]`, *optional*):
1354
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
1355
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
1356
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1357
- The number of images to generate per prompt.
1358
- eta (`float`, *optional*, defaults to 0.0):
1359
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
1360
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
1361
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1362
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
1363
- generation deterministic.
1364
- latents (`torch.FloatTensor`, *optional*):
1365
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
1366
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1367
- tensor is generated by sampling using the supplied random `generator`.
1368
- prompt_embeds (`torch.FloatTensor`, *optional*):
1369
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
1370
- provided, text embeddings are generated from the `prompt` input argument.
1371
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1372
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1373
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
1374
- ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1375
- output_type (`str`, *optional*, defaults to `"pil"`):
1376
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1377
- return_dict (`bool`, *optional*, defaults to `True`):
1378
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1379
- plain tuple.
1380
- cross_attention_kwargs (`dict`, *optional*):
1381
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
1382
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1383
- guidance_rescale (`float`, *optional*, defaults to 0.0):
1384
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
1385
- Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
1386
- using zero terminal SNR.
1387
- clip_skip (`int`, *optional*):
1388
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1389
- the output of the pre-final layer will be used for computing the prompt embeddings.
1390
- callback_on_step_end (`Callable`, *optional*):
1391
- A function that calls at the end of each denoising steps during the inference. The function is called
1392
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1393
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1394
- `callback_on_step_end_tensor_inputs`.
1395
- callback_on_step_end_tensor_inputs (`List`, *optional*):
1396
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1397
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1398
- `._callback_tensor_inputs` attribute of your pipeline class.
1399
-
1400
- Examples:
1401
-
1402
- Returns:
1403
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1404
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1405
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
1406
- second element is a list of `bool`s indicating whether the corresponding generated image contains
1407
- "not-safe-for-work" (nsfw) content.
1408
- """
1409
-
1410
- callback = kwargs.pop("callback", None)
1411
- callback_steps = kwargs.pop("callback_steps", None)
1412
-
1413
- if callback is not None:
1414
- deprecate(
1415
- "callback",
1416
- "1.0.0",
1417
- "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1418
- )
1419
- if callback_steps is not None:
1420
- deprecate(
1421
- "callback_steps",
1422
- "1.0.0",
1423
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1424
- )
1425
-
1426
- # -1. Register attention control (for BoxDiff)
1427
- attention_store = AttentionStore()
1428
- register_attention_control(self, attention_store)
1429
-
1430
- # 0. Default height and width to unet
1431
- height = height or self.unet.config.sample_size * self.vae_scale_factor
1432
- width = width or self.unet.config.sample_size * self.vae_scale_factor
1433
- # to deal with lora scaling and other possible forward hooks
1434
-
1435
- # 1. Check inputs. Raise error if not correct
1436
- self.check_inputs(
1437
- prompt,
1438
- height,
1439
- width,
1440
- boxdiff_phrases,
1441
- boxdiff_boxes,
1442
- callback_steps,
1443
- negative_prompt,
1444
- prompt_embeds,
1445
- negative_prompt_embeds,
1446
- callback_on_step_end_tensor_inputs,
1447
- )
1448
- self.prompt = prompt
1449
-
1450
- self._guidance_scale = guidance_scale
1451
- self._guidance_rescale = guidance_rescale
1452
- self._clip_skip = clip_skip
1453
- self._cross_attention_kwargs = cross_attention_kwargs
1454
- self._interrupt = False
1455
-
1456
- # 2. Define call parameters
1457
- if prompt is not None and isinstance(prompt, str):
1458
- batch_size = 1
1459
- elif prompt is not None and isinstance(prompt, list):
1460
- batch_size = len(prompt)
1461
- else:
1462
- batch_size = prompt_embeds.shape[0]
1463
-
1464
- device = self._execution_device
1465
-
1466
- # 3. Encode input prompt
1467
- lora_scale = (
1468
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1469
- )
1470
-
1471
- text_inputs, prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1472
- prompt,
1473
- device,
1474
- num_images_per_prompt,
1475
- self.do_classifier_free_guidance,
1476
- negative_prompt,
1477
- prompt_embeds=prompt_embeds,
1478
- negative_prompt_embeds=negative_prompt_embeds,
1479
- lora_scale=lora_scale,
1480
- clip_skip=self.clip_skip,
1481
- )
1482
-
1483
- # For classifier free guidance, we need to do two forward passes.
1484
- # Here we concatenate the unconditional and text embeddings into a single batch
1485
- # to avoid doing two forward passes
1486
- if self.do_classifier_free_guidance:
1487
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1488
-
1489
- if ip_adapter_image is not None:
1490
- output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
1491
- image_embeds, negative_image_embeds = self.encode_image(
1492
- ip_adapter_image, device, num_images_per_prompt, output_hidden_state
1493
- )
1494
- if self.do_classifier_free_guidance:
1495
- image_embeds = torch.cat([negative_image_embeds, image_embeds])
1496
-
1497
- # 4. Prepare timesteps
1498
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1499
-
1500
- # 5. Prepare latent variables
1501
- num_channels_latents = self.unet.config.in_channels
1502
- latents = self.prepare_latents(
1503
- batch_size * num_images_per_prompt,
1504
- num_channels_latents,
1505
- height,
1506
- width,
1507
- prompt_embeds.dtype,
1508
- device,
1509
- generator,
1510
- latents,
1511
- )
1512
-
1513
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1514
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1515
-
1516
- # 6.1 Add image embeds for IP-Adapter
1517
- added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
1518
-
1519
- # 6.2 Optionally get Guidance Scale Embedding
1520
- timestep_cond = None
1521
- if self.unet.config.time_cond_proj_dim is not None:
1522
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1523
- timestep_cond = self.get_guidance_scale_embedding(
1524
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1525
- ).to(device=device, dtype=latents.dtype)
1526
-
1527
- # 6.3 Prepare BoxDiff inputs
1528
- # a) Indices to alter
1529
- input_ids = self.tokenizer(prompt)["input_ids"]
1530
- decoded = [self.tokenizer.decode([t]) for t in input_ids]
1531
- indices_to_alter = []
1532
- bboxes = []
1533
- for phrase, box in zip(boxdiff_phrases, boxdiff_boxes):
1534
- # it could happen that phrase does not correspond a single token?
1535
- if phrase not in decoded:
1536
- continue
1537
- indices_to_alter.append(decoded.index(phrase))
1538
- bboxes.append(box)
1539
-
1540
- # b) A bunch of hyperparameters
1541
- attention_res = boxdiff_kwargs.get("attention_res", 16)
1542
- smooth_attentions = boxdiff_kwargs.get("smooth_attentions", True)
1543
- sigma = boxdiff_kwargs.get("sigma", 0.5)
1544
- kernel_size = boxdiff_kwargs.get("kernel_size", 3)
1545
- L = boxdiff_kwargs.get("L", 1)
1546
- P = boxdiff_kwargs.get("P", 0.2)
1547
- thresholds = boxdiff_kwargs.get("loss_thresholds", {0: 0.05, 10: 0.5, 20: 0.8})
1548
- max_iter_to_alter = boxdiff_kwargs.get("max_iter_to_alter", len(self.scheduler.timesteps) + 1)
1549
- scale_factor = boxdiff_kwargs.get("scale_factor", 20)
1550
- refine = boxdiff_kwargs.get("refine", False)
1551
- normalize_eot = boxdiff_kwargs.get("normalize_eot", True)
1552
-
1553
- scale_range = boxdiff_kwargs.get("scale_range", (1.0, 0.5))
1554
- scale_range = np.linspace(scale_range[0], scale_range[1], len(self.scheduler.timesteps))
1555
-
1556
- # 7. Denoising loop
1557
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1558
- self._num_timesteps = len(timesteps)
1559
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1560
- for i, t in enumerate(timesteps):
1561
- if self.interrupt:
1562
- continue
1563
-
1564
- # BoxDiff optimization
1565
- with torch.enable_grad():
1566
- latents = latents.clone().detach().requires_grad_(True)
1567
-
1568
- # Forward pass of denoising with text conditioning
1569
- noise_pred_text = self.unet(
1570
- latents,
1571
- t,
1572
- encoder_hidden_states=prompt_embeds[1].unsqueeze(0),
1573
- cross_attention_kwargs=cross_attention_kwargs,
1574
- ).sample
1575
- self.unet.zero_grad()
1576
-
1577
- # Get max activation value for each subject token
1578
- (
1579
- max_attention_per_index_fg,
1580
- max_attention_per_index_bg,
1581
- dist_x,
1582
- dist_y,
1583
- ) = self._aggregate_and_get_max_attention_per_token(
1584
- attention_store=attention_store,
1585
- indices_to_alter=indices_to_alter,
1586
- attention_res=attention_res,
1587
- smooth_attentions=smooth_attentions,
1588
- sigma=sigma,
1589
- kernel_size=kernel_size,
1590
- normalize_eot=normalize_eot,
1591
- bboxes=bboxes,
1592
- L=L,
1593
- P=P,
1594
- )
1595
-
1596
- loss_fg, loss = self._compute_loss(
1597
- max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y
1598
- )
1599
-
1600
- # Refinement from attend-and-excite (not necessary)
1601
- if refine and i in thresholds.keys() and loss_fg > 1.0 - thresholds[i]:
1602
- del noise_pred_text
1603
- torch.cuda.empty_cache()
1604
- loss_fg, latents, max_attention_per_index_fg = self._perform_iterative_refinement_step(
1605
- latents=latents,
1606
- indices_to_alter=indices_to_alter,
1607
- loss_fg=loss_fg,
1608
- threshold=thresholds[i],
1609
- text_embeddings=prompt_embeds,
1610
- text_input=text_inputs,
1611
- attention_store=attention_store,
1612
- step_size=scale_factor * np.sqrt(scale_range[i]),
1613
- t=t,
1614
- attention_res=attention_res,
1615
- smooth_attentions=smooth_attentions,
1616
- sigma=sigma,
1617
- kernel_size=kernel_size,
1618
- normalize_eot=normalize_eot,
1619
- bboxes=bboxes,
1620
- L=L,
1621
- P=P,
1622
- )
1623
-
1624
- # Perform gradient update
1625
- if i < max_iter_to_alter:
1626
- _, loss = self._compute_loss(
1627
- max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y
1628
- )
1629
- if loss != 0:
1630
- latents = self._update_latent(
1631
- latents=latents, loss=loss, step_size=scale_factor * np.sqrt(scale_range[i])
1632
- )
1633
-
1634
- # expand the latents if we are doing classifier free guidance
1635
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1636
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1637
-
1638
- # predict the noise residual
1639
- noise_pred = self.unet(
1640
- latent_model_input,
1641
- t,
1642
- encoder_hidden_states=prompt_embeds,
1643
- timestep_cond=timestep_cond,
1644
- cross_attention_kwargs=self.cross_attention_kwargs,
1645
- added_cond_kwargs=added_cond_kwargs,
1646
- return_dict=False,
1647
- )[0]
1648
-
1649
- # perform guidance
1650
- if self.do_classifier_free_guidance:
1651
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1652
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1653
-
1654
- if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1655
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1656
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1657
-
1658
- # compute the previous noisy sample x_t -> x_t-1
1659
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1660
-
1661
- if callback_on_step_end is not None:
1662
- callback_kwargs = {}
1663
- for k in callback_on_step_end_tensor_inputs:
1664
- callback_kwargs[k] = locals()[k]
1665
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1666
-
1667
- latents = callback_outputs.pop("latents", latents)
1668
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1669
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1670
-
1671
- # call the callback, if provided
1672
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1673
- progress_bar.update()
1674
- if callback is not None and i % callback_steps == 0:
1675
- step_idx = i // getattr(self.scheduler, "order", 1)
1676
- callback(step_idx, t, latents)
1677
-
1678
- if not output_type == "latent":
1679
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1680
- 0
1681
- ]
1682
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1683
- else:
1684
- image = latents
1685
- has_nsfw_concept = None
1686
-
1687
- if has_nsfw_concept is None:
1688
- do_denormalize = [True] * image.shape[0]
1689
- else:
1690
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1691
-
1692
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1693
-
1694
- # Offload all models
1695
- self.maybe_free_model_hooks()
1696
-
1697
- if not return_dict:
1698
- return (image, has_nsfw_concept)
1699
-
1700
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_pag.py DELETED
@@ -1,1471 +0,0 @@
1
- # Implementation of StableDiffusionPipeline with PAG
2
- # https://ku-cvlab.github.io/Perturbed-Attention-Guidance
3
-
4
- import inspect
5
- from typing import Any, Callable, Dict, List, Optional, Union
6
-
7
- import torch
8
- import torch.nn.functional as F
9
- from packaging import version
10
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
11
-
12
- from diffusers.configuration_utils import FrozenDict
13
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
14
- from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
15
- from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
16
- from diffusers.models.attention_processor import Attention, AttnProcessor2_0, FusedAttnProcessor2_0
17
- from diffusers.models.lora import adjust_lora_scale_text_encoder
18
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline
19
- from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
20
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
21
- from diffusers.schedulers import KarrasDiffusionSchedulers
22
- from diffusers.utils import (
23
- USE_PEFT_BACKEND,
24
- deprecate,
25
- logging,
26
- replace_example_docstring,
27
- scale_lora_layers,
28
- unscale_lora_layers,
29
- )
30
- from diffusers.utils.torch_utils import randn_tensor
31
-
32
-
33
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
-
35
- EXAMPLE_DOC_STRING = """
36
- Examples:
37
- ```py
38
- >>> import torch
39
- >>> from diffusers import StableDiffusionPipeline
40
- >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
41
- >>> pipe = pipe.to("cuda")
42
- >>> prompt = "a photo of an astronaut riding a horse on mars"
43
- >>> image = pipe(prompt).images[0]
44
- ```
45
- """
46
-
47
-
48
- class PAGIdentitySelfAttnProcessor:
49
- r"""
50
- Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
51
- """
52
-
53
- def __init__(self):
54
- if not hasattr(F, "scaled_dot_product_attention"):
55
- raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
56
-
57
- def __call__(
58
- self,
59
- attn: Attention,
60
- hidden_states: torch.Tensor,
61
- encoder_hidden_states: Optional[torch.Tensor] = None,
62
- attention_mask: Optional[torch.Tensor] = None,
63
- temb: Optional[torch.Tensor] = None,
64
- *args,
65
- **kwargs,
66
- ) -> torch.Tensor:
67
- if len(args) > 0 or kwargs.get("scale", None) is not None:
68
- deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
69
- deprecate("scale", "1.0.0", deprecation_message)
70
-
71
- residual = hidden_states
72
- if attn.spatial_norm is not None:
73
- hidden_states = attn.spatial_norm(hidden_states, temb)
74
-
75
- input_ndim = hidden_states.ndim
76
- if input_ndim == 4:
77
- batch_size, channel, height, width = hidden_states.shape
78
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
79
-
80
- # chunk
81
- hidden_states_org, hidden_states_ptb = hidden_states.chunk(2)
82
-
83
- # original path
84
- batch_size, sequence_length, _ = hidden_states_org.shape
85
-
86
- if attention_mask is not None:
87
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
88
- # scaled_dot_product_attention expects attention_mask shape to be
89
- # (batch, heads, source_length, target_length)
90
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
91
-
92
- if attn.group_norm is not None:
93
- hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2)
94
-
95
- query = attn.to_q(hidden_states_org)
96
- key = attn.to_k(hidden_states_org)
97
- value = attn.to_v(hidden_states_org)
98
-
99
- inner_dim = key.shape[-1]
100
- head_dim = inner_dim // attn.heads
101
-
102
- query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
103
-
104
- key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
105
- value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
106
-
107
- # the output of sdp = (batch, num_heads, seq_len, head_dim)
108
- # TODO: add support for attn.scale when we move to Torch 2.1
109
- hidden_states_org = F.scaled_dot_product_attention(
110
- query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
111
- )
112
-
113
- hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
114
- hidden_states_org = hidden_states_org.to(query.dtype)
115
-
116
- # linear proj
117
- hidden_states_org = attn.to_out[0](hidden_states_org)
118
- # dropout
119
- hidden_states_org = attn.to_out[1](hidden_states_org)
120
-
121
- if input_ndim == 4:
122
- hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width)
123
-
124
- # perturbed path (identity attention)
125
- batch_size, sequence_length, _ = hidden_states_ptb.shape
126
-
127
- if attention_mask is not None:
128
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
129
- # scaled_dot_product_attention expects attention_mask shape to be
130
- # (batch, heads, source_length, target_length)
131
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
132
-
133
- if attn.group_norm is not None:
134
- hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2)
135
-
136
- value = attn.to_v(hidden_states_ptb)
137
-
138
- # hidden_states_ptb = torch.zeros(value.shape).to(value.get_device())
139
- hidden_states_ptb = value
140
-
141
- hidden_states_ptb = hidden_states_ptb.to(query.dtype)
142
-
143
- # linear proj
144
- hidden_states_ptb = attn.to_out[0](hidden_states_ptb)
145
- # dropout
146
- hidden_states_ptb = attn.to_out[1](hidden_states_ptb)
147
-
148
- if input_ndim == 4:
149
- hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width)
150
-
151
- # cat
152
- hidden_states = torch.cat([hidden_states_org, hidden_states_ptb])
153
-
154
- if attn.residual_connection:
155
- hidden_states = hidden_states + residual
156
-
157
- hidden_states = hidden_states / attn.rescale_output_factor
158
-
159
- return hidden_states
160
-
161
-
162
- class PAGCFGIdentitySelfAttnProcessor:
163
- r"""
164
- Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
165
- """
166
-
167
- def __init__(self):
168
- if not hasattr(F, "scaled_dot_product_attention"):
169
- raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
170
-
171
- def __call__(
172
- self,
173
- attn: Attention,
174
- hidden_states: torch.Tensor,
175
- encoder_hidden_states: Optional[torch.Tensor] = None,
176
- attention_mask: Optional[torch.Tensor] = None,
177
- temb: Optional[torch.Tensor] = None,
178
- *args,
179
- **kwargs,
180
- ) -> torch.Tensor:
181
- if len(args) > 0 or kwargs.get("scale", None) is not None:
182
- deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
183
- deprecate("scale", "1.0.0", deprecation_message)
184
-
185
- residual = hidden_states
186
- if attn.spatial_norm is not None:
187
- hidden_states = attn.spatial_norm(hidden_states, temb)
188
-
189
- input_ndim = hidden_states.ndim
190
- if input_ndim == 4:
191
- batch_size, channel, height, width = hidden_states.shape
192
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
193
-
194
- # chunk
195
- hidden_states_uncond, hidden_states_org, hidden_states_ptb = hidden_states.chunk(3)
196
- hidden_states_org = torch.cat([hidden_states_uncond, hidden_states_org])
197
-
198
- # original path
199
- batch_size, sequence_length, _ = hidden_states_org.shape
200
-
201
- if attention_mask is not None:
202
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
203
- # scaled_dot_product_attention expects attention_mask shape to be
204
- # (batch, heads, source_length, target_length)
205
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
206
-
207
- if attn.group_norm is not None:
208
- hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2)
209
-
210
- query = attn.to_q(hidden_states_org)
211
- key = attn.to_k(hidden_states_org)
212
- value = attn.to_v(hidden_states_org)
213
-
214
- inner_dim = key.shape[-1]
215
- head_dim = inner_dim // attn.heads
216
-
217
- query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
218
-
219
- key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
220
- value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
221
-
222
- # the output of sdp = (batch, num_heads, seq_len, head_dim)
223
- # TODO: add support for attn.scale when we move to Torch 2.1
224
- hidden_states_org = F.scaled_dot_product_attention(
225
- query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
226
- )
227
-
228
- hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
229
- hidden_states_org = hidden_states_org.to(query.dtype)
230
-
231
- # linear proj
232
- hidden_states_org = attn.to_out[0](hidden_states_org)
233
- # dropout
234
- hidden_states_org = attn.to_out[1](hidden_states_org)
235
-
236
- if input_ndim == 4:
237
- hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width)
238
-
239
- # perturbed path (identity attention)
240
- batch_size, sequence_length, _ = hidden_states_ptb.shape
241
-
242
- if attention_mask is not None:
243
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
244
- # scaled_dot_product_attention expects attention_mask shape to be
245
- # (batch, heads, source_length, target_length)
246
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
247
-
248
- if attn.group_norm is not None:
249
- hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2)
250
-
251
- value = attn.to_v(hidden_states_ptb)
252
- hidden_states_ptb = value
253
- hidden_states_ptb = hidden_states_ptb.to(query.dtype)
254
-
255
- # linear proj
256
- hidden_states_ptb = attn.to_out[0](hidden_states_ptb)
257
- # dropout
258
- hidden_states_ptb = attn.to_out[1](hidden_states_ptb)
259
-
260
- if input_ndim == 4:
261
- hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width)
262
-
263
- # cat
264
- hidden_states = torch.cat([hidden_states_org, hidden_states_ptb])
265
-
266
- if attn.residual_connection:
267
- hidden_states = hidden_states + residual
268
-
269
- hidden_states = hidden_states / attn.rescale_output_factor
270
-
271
- return hidden_states
272
-
273
-
274
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
275
- """
276
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
277
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
278
- """
279
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
280
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
281
- # rescale the results from guidance (fixes overexposure)
282
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
283
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
284
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
285
- return noise_cfg
286
-
287
-
288
- def retrieve_timesteps(
289
- scheduler,
290
- num_inference_steps: Optional[int] = None,
291
- device: Optional[Union[str, torch.device]] = None,
292
- timesteps: Optional[List[int]] = None,
293
- **kwargs,
294
- ):
295
- """
296
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
297
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
298
- Args:
299
- scheduler (`SchedulerMixin`):
300
- The scheduler to get timesteps from.
301
- num_inference_steps (`int`):
302
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
303
- `timesteps` must be `None`.
304
- device (`str` or `torch.device`, *optional*):
305
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
306
- timesteps (`List[int]`, *optional*):
307
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
308
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
309
- must be `None`.
310
- Returns:
311
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
312
- second element is the number of inference steps.
313
- """
314
- if timesteps is not None:
315
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
316
- if not accepts_timesteps:
317
- raise ValueError(
318
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
319
- f" timestep schedules. Please check whether you are using the correct scheduler."
320
- )
321
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
322
- timesteps = scheduler.timesteps
323
- num_inference_steps = len(timesteps)
324
- else:
325
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
326
- timesteps = scheduler.timesteps
327
- return timesteps, num_inference_steps
328
-
329
-
330
- class StableDiffusionPAGPipeline(
331
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
332
- ):
333
- r"""
334
- Pipeline for text-to-image generation using Stable Diffusion.
335
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
336
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
337
- The pipeline also inherits the following loading methods:
338
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
339
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
340
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
341
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
342
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
343
- Args:
344
- vae ([`AutoencoderKL`]):
345
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
346
- text_encoder ([`~transformers.CLIPTextModel`]):
347
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
348
- tokenizer ([`~transformers.CLIPTokenizer`]):
349
- A `CLIPTokenizer` to tokenize text.
350
- unet ([`UNet2DConditionModel`]):
351
- A `UNet2DConditionModel` to denoise the encoded image latents.
352
- scheduler ([`SchedulerMixin`]):
353
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
354
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
355
- safety_checker ([`StableDiffusionSafetyChecker`]):
356
- Classification module that estimates whether generated images could be considered offensive or harmful.
357
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
358
- about a model's potential harms.
359
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
360
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
361
- """
362
-
363
- model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
364
- _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
365
- _exclude_from_cpu_offload = ["safety_checker"]
366
- _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
367
-
368
- def __init__(
369
- self,
370
- vae: AutoencoderKL,
371
- text_encoder: CLIPTextModel,
372
- tokenizer: CLIPTokenizer,
373
- unet: UNet2DConditionModel,
374
- scheduler: KarrasDiffusionSchedulers,
375
- safety_checker: StableDiffusionSafetyChecker,
376
- feature_extractor: CLIPImageProcessor,
377
- image_encoder: CLIPVisionModelWithProjection = None,
378
- requires_safety_checker: bool = True,
379
- ):
380
- super().__init__()
381
-
382
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
383
- deprecation_message = (
384
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
385
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
386
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
387
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
388
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
389
- " file"
390
- )
391
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
392
- new_config = dict(scheduler.config)
393
- new_config["steps_offset"] = 1
394
- scheduler._internal_dict = FrozenDict(new_config)
395
-
396
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
397
- deprecation_message = (
398
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
399
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
400
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
401
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
402
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
403
- )
404
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
405
- new_config = dict(scheduler.config)
406
- new_config["clip_sample"] = False
407
- scheduler._internal_dict = FrozenDict(new_config)
408
-
409
- if safety_checker is None and requires_safety_checker:
410
- logger.warning(
411
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
412
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
413
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
414
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
415
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
416
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
417
- )
418
-
419
- if safety_checker is not None and feature_extractor is None:
420
- raise ValueError(
421
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
422
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
423
- )
424
-
425
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
426
- version.parse(unet.config._diffusers_version).base_version
427
- ) < version.parse("0.9.0.dev0")
428
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
429
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
430
- deprecation_message = (
431
- "The configuration file of the unet has set the default `sample_size` to smaller than"
432
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
433
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
434
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
435
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
436
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
437
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
438
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
439
- " the `unet/config.json` file"
440
- )
441
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
442
- new_config = dict(unet.config)
443
- new_config["sample_size"] = 64
444
- unet._internal_dict = FrozenDict(new_config)
445
-
446
- self.register_modules(
447
- vae=vae,
448
- text_encoder=text_encoder,
449
- tokenizer=tokenizer,
450
- unet=unet,
451
- scheduler=scheduler,
452
- safety_checker=safety_checker,
453
- feature_extractor=feature_extractor,
454
- image_encoder=image_encoder,
455
- )
456
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
457
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
458
- self.register_to_config(requires_safety_checker=requires_safety_checker)
459
-
460
- def enable_vae_slicing(self):
461
- r"""
462
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
463
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
464
- """
465
- self.vae.enable_slicing()
466
-
467
- def disable_vae_slicing(self):
468
- r"""
469
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
470
- computing decoding in one step.
471
- """
472
- self.vae.disable_slicing()
473
-
474
- def enable_vae_tiling(self):
475
- r"""
476
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
477
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
478
- processing larger images.
479
- """
480
- self.vae.enable_tiling()
481
-
482
- def disable_vae_tiling(self):
483
- r"""
484
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
485
- computing decoding in one step.
486
- """
487
- self.vae.disable_tiling()
488
-
489
- def _encode_prompt(
490
- self,
491
- prompt,
492
- device,
493
- num_images_per_prompt,
494
- do_classifier_free_guidance,
495
- negative_prompt=None,
496
- prompt_embeds: Optional[torch.Tensor] = None,
497
- negative_prompt_embeds: Optional[torch.Tensor] = None,
498
- lora_scale: Optional[float] = None,
499
- **kwargs,
500
- ):
501
- deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
502
- deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
503
-
504
- prompt_embeds_tuple = self.encode_prompt(
505
- prompt=prompt,
506
- device=device,
507
- num_images_per_prompt=num_images_per_prompt,
508
- do_classifier_free_guidance=do_classifier_free_guidance,
509
- negative_prompt=negative_prompt,
510
- prompt_embeds=prompt_embeds,
511
- negative_prompt_embeds=negative_prompt_embeds,
512
- lora_scale=lora_scale,
513
- **kwargs,
514
- )
515
-
516
- # concatenate for backwards comp
517
- prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
518
-
519
- return prompt_embeds
520
-
521
- def encode_prompt(
522
- self,
523
- prompt,
524
- device,
525
- num_images_per_prompt,
526
- do_classifier_free_guidance,
527
- negative_prompt=None,
528
- prompt_embeds: Optional[torch.Tensor] = None,
529
- negative_prompt_embeds: Optional[torch.Tensor] = None,
530
- lora_scale: Optional[float] = None,
531
- clip_skip: Optional[int] = None,
532
- ):
533
- r"""
534
- Encodes the prompt into text encoder hidden states.
535
- Args:
536
- prompt (`str` or `List[str]`, *optional*):
537
- prompt to be encoded
538
- device: (`torch.device`):
539
- torch device
540
- num_images_per_prompt (`int`):
541
- number of images that should be generated per prompt
542
- do_classifier_free_guidance (`bool`):
543
- whether to use classifier free guidance or not
544
- negative_prompt (`str` or `List[str]`, *optional*):
545
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
546
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
547
- less than `1`).
548
- prompt_embeds (`torch.Tensor`, *optional*):
549
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
550
- provided, text embeddings will be generated from `prompt` input argument.
551
- negative_prompt_embeds (`torch.Tensor`, *optional*):
552
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
553
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
554
- argument.
555
- lora_scale (`float`, *optional*):
556
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
557
- clip_skip (`int`, *optional*):
558
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
559
- the output of the pre-final layer will be used for computing the prompt embeddings.
560
- """
561
- # set lora scale so that monkey patched LoRA
562
- # function of text encoder can correctly access it
563
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
564
- self._lora_scale = lora_scale
565
-
566
- # dynamically adjust the LoRA scale
567
- if not USE_PEFT_BACKEND:
568
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
569
- else:
570
- scale_lora_layers(self.text_encoder, lora_scale)
571
-
572
- if prompt is not None and isinstance(prompt, str):
573
- batch_size = 1
574
- elif prompt is not None and isinstance(prompt, list):
575
- batch_size = len(prompt)
576
- else:
577
- batch_size = prompt_embeds.shape[0]
578
-
579
- if prompt_embeds is None:
580
- # textual inversion: process multi-vector tokens if necessary
581
- if isinstance(self, TextualInversionLoaderMixin):
582
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
583
-
584
- text_inputs = self.tokenizer(
585
- prompt,
586
- padding="max_length",
587
- max_length=self.tokenizer.model_max_length,
588
- truncation=True,
589
- return_tensors="pt",
590
- )
591
- text_input_ids = text_inputs.input_ids
592
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
593
-
594
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
595
- text_input_ids, untruncated_ids
596
- ):
597
- removed_text = self.tokenizer.batch_decode(
598
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
599
- )
600
- logger.warning(
601
- "The following part of your input was truncated because CLIP can only handle sequences up to"
602
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
603
- )
604
-
605
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
606
- attention_mask = text_inputs.attention_mask.to(device)
607
- else:
608
- attention_mask = None
609
-
610
- if clip_skip is None:
611
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
612
- prompt_embeds = prompt_embeds[0]
613
- else:
614
- prompt_embeds = self.text_encoder(
615
- text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
616
- )
617
- # Access the `hidden_states` first, that contains a tuple of
618
- # all the hidden states from the encoder layers. Then index into
619
- # the tuple to access the hidden states from the desired layer.
620
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
621
- # We also need to apply the final LayerNorm here to not mess with the
622
- # representations. The `last_hidden_states` that we typically use for
623
- # obtaining the final prompt representations passes through the LayerNorm
624
- # layer.
625
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
626
-
627
- if self.text_encoder is not None:
628
- prompt_embeds_dtype = self.text_encoder.dtype
629
- elif self.unet is not None:
630
- prompt_embeds_dtype = self.unet.dtype
631
- else:
632
- prompt_embeds_dtype = prompt_embeds.dtype
633
-
634
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
635
-
636
- bs_embed, seq_len, _ = prompt_embeds.shape
637
- # duplicate text embeddings for each generation per prompt, using mps friendly method
638
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
639
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
640
-
641
- # get unconditional embeddings for classifier free guidance
642
- if do_classifier_free_guidance and negative_prompt_embeds is None:
643
- uncond_tokens: List[str]
644
- if negative_prompt is None:
645
- uncond_tokens = [""] * batch_size
646
- elif prompt is not None and type(prompt) is not type(negative_prompt):
647
- raise TypeError(
648
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
649
- f" {type(prompt)}."
650
- )
651
- elif isinstance(negative_prompt, str):
652
- uncond_tokens = [negative_prompt]
653
- elif batch_size != len(negative_prompt):
654
- raise ValueError(
655
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
656
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
657
- " the batch size of `prompt`."
658
- )
659
- else:
660
- uncond_tokens = negative_prompt
661
-
662
- # textual inversion: process multi-vector tokens if necessary
663
- if isinstance(self, TextualInversionLoaderMixin):
664
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
665
-
666
- max_length = prompt_embeds.shape[1]
667
- uncond_input = self.tokenizer(
668
- uncond_tokens,
669
- padding="max_length",
670
- max_length=max_length,
671
- truncation=True,
672
- return_tensors="pt",
673
- )
674
-
675
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
676
- attention_mask = uncond_input.attention_mask.to(device)
677
- else:
678
- attention_mask = None
679
-
680
- negative_prompt_embeds = self.text_encoder(
681
- uncond_input.input_ids.to(device),
682
- attention_mask=attention_mask,
683
- )
684
- negative_prompt_embeds = negative_prompt_embeds[0]
685
-
686
- if do_classifier_free_guidance:
687
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
688
- seq_len = negative_prompt_embeds.shape[1]
689
-
690
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
691
-
692
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
693
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
694
-
695
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
696
- # Retrieve the original scale by scaling back the LoRA layers
697
- unscale_lora_layers(self.text_encoder, lora_scale)
698
-
699
- return prompt_embeds, negative_prompt_embeds
700
-
701
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
702
- dtype = next(self.image_encoder.parameters()).dtype
703
-
704
- if not isinstance(image, torch.Tensor):
705
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
706
-
707
- image = image.to(device=device, dtype=dtype)
708
- if output_hidden_states:
709
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
710
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
711
- uncond_image_enc_hidden_states = self.image_encoder(
712
- torch.zeros_like(image), output_hidden_states=True
713
- ).hidden_states[-2]
714
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
715
- num_images_per_prompt, dim=0
716
- )
717
- return image_enc_hidden_states, uncond_image_enc_hidden_states
718
- else:
719
- image_embeds = self.image_encoder(image).image_embeds
720
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
721
- uncond_image_embeds = torch.zeros_like(image_embeds)
722
-
723
- return image_embeds, uncond_image_embeds
724
-
725
- def prepare_ip_adapter_image_embeds(
726
- self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
727
- ):
728
- if ip_adapter_image_embeds is None:
729
- if not isinstance(ip_adapter_image, list):
730
- ip_adapter_image = [ip_adapter_image]
731
-
732
- if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
733
- raise ValueError(
734
- f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
735
- )
736
-
737
- image_embeds = []
738
- for single_ip_adapter_image, image_proj_layer in zip(
739
- ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
740
- ):
741
- output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
742
- single_image_embeds, single_negative_image_embeds = self.encode_image(
743
- single_ip_adapter_image, device, 1, output_hidden_state
744
- )
745
- single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
746
- single_negative_image_embeds = torch.stack(
747
- [single_negative_image_embeds] * num_images_per_prompt, dim=0
748
- )
749
-
750
- if self.do_classifier_free_guidance:
751
- single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
752
- single_image_embeds = single_image_embeds.to(device)
753
-
754
- image_embeds.append(single_image_embeds)
755
- else:
756
- image_embeds = ip_adapter_image_embeds
757
- return image_embeds
758
-
759
- def run_safety_checker(self, image, device, dtype):
760
- if self.safety_checker is None:
761
- has_nsfw_concept = None
762
- else:
763
- if torch.is_tensor(image):
764
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
765
- else:
766
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
767
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
768
- image, has_nsfw_concept = self.safety_checker(
769
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
770
- )
771
- return image, has_nsfw_concept
772
-
773
- def decode_latents(self, latents):
774
- deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
775
- deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
776
-
777
- latents = 1 / self.vae.config.scaling_factor * latents
778
- image = self.vae.decode(latents, return_dict=False)[0]
779
- image = (image / 2 + 0.5).clamp(0, 1)
780
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
781
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
782
- return image
783
-
784
- def prepare_extra_step_kwargs(self, generator, eta):
785
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
786
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
787
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
788
- # and should be between [0, 1]
789
-
790
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
791
- extra_step_kwargs = {}
792
- if accepts_eta:
793
- extra_step_kwargs["eta"] = eta
794
-
795
- # check if the scheduler accepts generator
796
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
797
- if accepts_generator:
798
- extra_step_kwargs["generator"] = generator
799
- return extra_step_kwargs
800
-
801
- def check_inputs(
802
- self,
803
- prompt,
804
- height,
805
- width,
806
- callback_steps,
807
- negative_prompt=None,
808
- prompt_embeds=None,
809
- negative_prompt_embeds=None,
810
- ip_adapter_image=None,
811
- ip_adapter_image_embeds=None,
812
- callback_on_step_end_tensor_inputs=None,
813
- ):
814
- if height % 8 != 0 or width % 8 != 0:
815
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
816
-
817
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
818
- raise ValueError(
819
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
820
- f" {type(callback_steps)}."
821
- )
822
- if callback_on_step_end_tensor_inputs is not None and not all(
823
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
824
- ):
825
- raise ValueError(
826
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
827
- )
828
-
829
- if prompt is not None and prompt_embeds is not None:
830
- raise ValueError(
831
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
832
- " only forward one of the two."
833
- )
834
- elif prompt is None and prompt_embeds is None:
835
- raise ValueError(
836
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
837
- )
838
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
839
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
840
-
841
- if negative_prompt is not None and negative_prompt_embeds is not None:
842
- raise ValueError(
843
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
844
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
845
- )
846
-
847
- if prompt_embeds is not None and negative_prompt_embeds is not None:
848
- if prompt_embeds.shape != negative_prompt_embeds.shape:
849
- raise ValueError(
850
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
851
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
852
- f" {negative_prompt_embeds.shape}."
853
- )
854
-
855
- if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
856
- raise ValueError(
857
- "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
858
- )
859
-
860
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
861
- shape = (
862
- batch_size,
863
- num_channels_latents,
864
- int(height) // self.vae_scale_factor,
865
- int(width) // self.vae_scale_factor,
866
- )
867
- if isinstance(generator, list) and len(generator) != batch_size:
868
- raise ValueError(
869
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
870
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
871
- )
872
-
873
- if latents is None:
874
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
875
- else:
876
- latents = latents.to(device)
877
-
878
- # scale the initial noise by the standard deviation required by the scheduler
879
- latents = latents * self.scheduler.init_noise_sigma
880
- return latents
881
-
882
- def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
883
- r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
884
- The suffixes after the scaling factors represent the stages where they are being applied.
885
- Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
886
- that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
887
- Args:
888
- s1 (`float`):
889
- Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
890
- mitigate "oversmoothing effect" in the enhanced denoising process.
891
- s2 (`float`):
892
- Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
893
- mitigate "oversmoothing effect" in the enhanced denoising process.
894
- b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
895
- b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
896
- """
897
- if not hasattr(self, "unet"):
898
- raise ValueError("The pipeline must have `unet` for using FreeU.")
899
- self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
900
-
901
- def disable_freeu(self):
902
- """Disables the FreeU mechanism if enabled."""
903
- self.unet.disable_freeu()
904
-
905
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
906
- def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
907
- """
908
- Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
909
- key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
910
- <Tip warning={true}>
911
- This API is 🧪 experimental.
912
- </Tip>
913
- Args:
914
- unet (`bool`, defaults to `True`): To apply fusion on the UNet.
915
- vae (`bool`, defaults to `True`): To apply fusion on the VAE.
916
- """
917
- self.fusing_unet = False
918
- self.fusing_vae = False
919
-
920
- if unet:
921
- self.fusing_unet = True
922
- self.unet.fuse_qkv_projections()
923
- self.unet.set_attn_processor(FusedAttnProcessor2_0())
924
-
925
- if vae:
926
- if not isinstance(self.vae, AutoencoderKL):
927
- raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
928
-
929
- self.fusing_vae = True
930
- self.vae.fuse_qkv_projections()
931
- self.vae.set_attn_processor(FusedAttnProcessor2_0())
932
-
933
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
934
- def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
935
- """Disable QKV projection fusion if enabled.
936
- <Tip warning={true}>
937
- This API is 🧪 experimental.
938
- </Tip>
939
- Args:
940
- unet (`bool`, defaults to `True`): To apply fusion on the UNet.
941
- vae (`bool`, defaults to `True`): To apply fusion on the VAE.
942
- """
943
- if unet:
944
- if not self.fusing_unet:
945
- logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
946
- else:
947
- self.unet.unfuse_qkv_projections()
948
- self.fusing_unet = False
949
-
950
- if vae:
951
- if not self.fusing_vae:
952
- logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
953
- else:
954
- self.vae.unfuse_qkv_projections()
955
- self.fusing_vae = False
956
-
957
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
958
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
959
- """
960
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
961
- Args:
962
- timesteps (`torch.Tensor`):
963
- generate embedding vectors at these timesteps
964
- embedding_dim (`int`, *optional*, defaults to 512):
965
- dimension of the embeddings to generate
966
- dtype:
967
- data type of the generated embeddings
968
- Returns:
969
- `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
970
- """
971
- assert len(w.shape) == 1
972
- w = w * 1000.0
973
-
974
- half_dim = embedding_dim // 2
975
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
976
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
977
- emb = w.to(dtype)[:, None] * emb[None, :]
978
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
979
- if embedding_dim % 2 == 1: # zero pad
980
- emb = torch.nn.functional.pad(emb, (0, 1))
981
- assert emb.shape == (w.shape[0], embedding_dim)
982
- return emb
983
-
984
- def pred_z0(self, sample, model_output, timestep):
985
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep].to(sample.device)
986
-
987
- beta_prod_t = 1 - alpha_prod_t
988
- if self.scheduler.config.prediction_type == "epsilon":
989
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
990
- elif self.scheduler.config.prediction_type == "sample":
991
- pred_original_sample = model_output
992
- elif self.scheduler.config.prediction_type == "v_prediction":
993
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
994
- # predict V
995
- model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
996
- else:
997
- raise ValueError(
998
- f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`,"
999
- " or `v_prediction`"
1000
- )
1001
-
1002
- return pred_original_sample
1003
-
1004
- def pred_x0(self, latents, noise_pred, t, generator, device, prompt_embeds, output_type):
1005
- pred_z0 = self.pred_z0(latents, noise_pred, t)
1006
- pred_x0 = self.vae.decode(pred_z0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0]
1007
- pred_x0, ____ = self.run_safety_checker(pred_x0, device, prompt_embeds.dtype)
1008
- do_denormalize = [True] * pred_x0.shape[0]
1009
- pred_x0 = self.image_processor.postprocess(pred_x0, output_type=output_type, do_denormalize=do_denormalize)
1010
-
1011
- return pred_x0
1012
-
1013
- @property
1014
- def guidance_scale(self):
1015
- return self._guidance_scale
1016
-
1017
- @property
1018
- def guidance_rescale(self):
1019
- return self._guidance_rescale
1020
-
1021
- @property
1022
- def clip_skip(self):
1023
- return self._clip_skip
1024
-
1025
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1026
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1027
- # corresponds to doing no classifier free guidance.
1028
- @property
1029
- def do_classifier_free_guidance(self):
1030
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
1031
-
1032
- @property
1033
- def cross_attention_kwargs(self):
1034
- return self._cross_attention_kwargs
1035
-
1036
- @property
1037
- def num_timesteps(self):
1038
- return self._num_timesteps
1039
-
1040
- @property
1041
- def interrupt(self):
1042
- return self._interrupt
1043
-
1044
- @property
1045
- def pag_scale(self):
1046
- return self._pag_scale
1047
-
1048
- @property
1049
- def do_perturbed_attention_guidance(self):
1050
- return self._pag_scale > 0
1051
-
1052
- @property
1053
- def pag_adaptive_scaling(self):
1054
- return self._pag_adaptive_scaling
1055
-
1056
- @property
1057
- def do_pag_adaptive_scaling(self):
1058
- return self._pag_adaptive_scaling > 0
1059
-
1060
- @property
1061
- def pag_applied_layers_index(self):
1062
- return self._pag_applied_layers_index
1063
-
1064
- @torch.no_grad()
1065
- @replace_example_docstring(EXAMPLE_DOC_STRING)
1066
- def __call__(
1067
- self,
1068
- prompt: Union[str, List[str]] = None,
1069
- height: Optional[int] = None,
1070
- width: Optional[int] = None,
1071
- num_inference_steps: int = 50,
1072
- timesteps: List[int] = None,
1073
- guidance_scale: float = 7.5,
1074
- pag_scale: float = 0.0,
1075
- pag_adaptive_scaling: float = 0.0,
1076
- pag_applied_layers_index: List[str] = ["d4"], # ['d4', 'd5', 'm0']
1077
- negative_prompt: Optional[Union[str, List[str]]] = None,
1078
- num_images_per_prompt: Optional[int] = 1,
1079
- eta: float = 0.0,
1080
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1081
- latents: Optional[torch.Tensor] = None,
1082
- prompt_embeds: Optional[torch.Tensor] = None,
1083
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1084
- ip_adapter_image: Optional[PipelineImageInput] = None,
1085
- ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1086
- output_type: Optional[str] = "pil",
1087
- return_dict: bool = True,
1088
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1089
- guidance_rescale: float = 0.0,
1090
- clip_skip: Optional[int] = None,
1091
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1092
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1093
- **kwargs,
1094
- ):
1095
- r"""
1096
- The call function to the pipeline for generation.
1097
- Args:
1098
- prompt (`str` or `List[str]`, *optional*):
1099
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
1100
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1101
- The height in pixels of the generated image.
1102
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1103
- The width in pixels of the generated image.
1104
- num_inference_steps (`int`, *optional*, defaults to 50):
1105
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1106
- expense of slower inference.
1107
- timesteps (`List[int]`, *optional*):
1108
- Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
1109
- in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
1110
- passed will be used. Must be in descending order.
1111
- guidance_scale (`float`, *optional*, defaults to 7.5):
1112
- A higher guidance scale value encourages the model to generate images closely linked to the text
1113
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
1114
- negative_prompt (`str` or `List[str]`, *optional*):
1115
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
1116
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
1117
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1118
- The number of images to generate per prompt.
1119
- eta (`float`, *optional*, defaults to 0.0):
1120
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
1121
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
1122
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1123
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
1124
- generation deterministic.
1125
- latents (`torch.Tensor`, *optional*):
1126
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
1127
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1128
- tensor is generated by sampling using the supplied random `generator`.
1129
- prompt_embeds (`torch.Tensor`, *optional*):
1130
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
1131
- provided, text embeddings are generated from the `prompt` input argument.
1132
- negative_prompt_embeds (`torch.Tensor`, *optional*):
1133
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1134
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
1135
- ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1136
- ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
1137
- Pre-generated image embeddings for IP-Adapter. If not
1138
- provided, embeddings are computed from the `ip_adapter_image` input argument.
1139
- output_type (`str`, *optional*, defaults to `"pil"`):
1140
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1141
- return_dict (`bool`, *optional*, defaults to `True`):
1142
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1143
- plain tuple.
1144
- cross_attention_kwargs (`dict`, *optional*):
1145
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
1146
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1147
- guidance_rescale (`float`, *optional*, defaults to 0.0):
1148
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
1149
- Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
1150
- using zero terminal SNR.
1151
- clip_skip (`int`, *optional*):
1152
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1153
- the output of the pre-final layer will be used for computing the prompt embeddings.
1154
- callback_on_step_end (`Callable`, *optional*):
1155
- A function that calls at the end of each denoising steps during the inference. The function is called
1156
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1157
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1158
- `callback_on_step_end_tensor_inputs`.
1159
- callback_on_step_end_tensor_inputs (`List`, *optional*):
1160
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1161
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1162
- `._callback_tensor_inputs` attribute of your pipeline class.
1163
- Examples:
1164
- Returns:
1165
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1166
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1167
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
1168
- second element is a list of `bool`s indicating whether the corresponding generated image contains
1169
- "not-safe-for-work" (nsfw) content.
1170
- """
1171
-
1172
- callback = kwargs.pop("callback", None)
1173
- callback_steps = kwargs.pop("callback_steps", None)
1174
-
1175
- if callback is not None:
1176
- deprecate(
1177
- "callback",
1178
- "1.0.0",
1179
- "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1180
- )
1181
- if callback_steps is not None:
1182
- deprecate(
1183
- "callback_steps",
1184
- "1.0.0",
1185
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1186
- )
1187
-
1188
- # 0. Default height and width to unet
1189
- height = height or self.unet.config.sample_size * self.vae_scale_factor
1190
- width = width or self.unet.config.sample_size * self.vae_scale_factor
1191
- # to deal with lora scaling and other possible forward hooks
1192
-
1193
- # 1. Check inputs. Raise error if not correct
1194
- self.check_inputs(
1195
- prompt,
1196
- height,
1197
- width,
1198
- callback_steps,
1199
- negative_prompt,
1200
- prompt_embeds,
1201
- negative_prompt_embeds,
1202
- ip_adapter_image,
1203
- ip_adapter_image_embeds,
1204
- callback_on_step_end_tensor_inputs,
1205
- )
1206
-
1207
- self._guidance_scale = guidance_scale
1208
- self._guidance_rescale = guidance_rescale
1209
- self._clip_skip = clip_skip
1210
- self._cross_attention_kwargs = cross_attention_kwargs
1211
- self._interrupt = False
1212
-
1213
- self._pag_scale = pag_scale
1214
- self._pag_adaptive_scaling = pag_adaptive_scaling
1215
- self._pag_applied_layers_index = pag_applied_layers_index
1216
-
1217
- # 2. Define call parameters
1218
- if prompt is not None and isinstance(prompt, str):
1219
- batch_size = 1
1220
- elif prompt is not None and isinstance(prompt, list):
1221
- batch_size = len(prompt)
1222
- else:
1223
- batch_size = prompt_embeds.shape[0]
1224
-
1225
- device = self._execution_device
1226
-
1227
- # 3. Encode input prompt
1228
- lora_scale = (
1229
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1230
- )
1231
-
1232
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1233
- prompt,
1234
- device,
1235
- num_images_per_prompt,
1236
- self.do_classifier_free_guidance,
1237
- negative_prompt,
1238
- prompt_embeds=prompt_embeds,
1239
- negative_prompt_embeds=negative_prompt_embeds,
1240
- lora_scale=lora_scale,
1241
- clip_skip=self.clip_skip,
1242
- )
1243
-
1244
- # For classifier free guidance, we need to do two forward passes.
1245
- # Here we concatenate the unconditional and text embeddings into a single batch
1246
- # to avoid doing two forward passes
1247
-
1248
- # cfg
1249
- if self.do_classifier_free_guidance and not self.do_perturbed_attention_guidance:
1250
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1251
- # pag
1252
- elif not self.do_classifier_free_guidance and self.do_perturbed_attention_guidance:
1253
- prompt_embeds = torch.cat([prompt_embeds, prompt_embeds])
1254
- # both
1255
- elif self.do_classifier_free_guidance and self.do_perturbed_attention_guidance:
1256
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, prompt_embeds])
1257
-
1258
- if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1259
- image_embeds = self.prepare_ip_adapter_image_embeds(
1260
- ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt
1261
- )
1262
-
1263
- # 4. Prepare timesteps
1264
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1265
-
1266
- # 5. Prepare latent variables
1267
- num_channels_latents = self.unet.config.in_channels
1268
- latents = self.prepare_latents(
1269
- batch_size * num_images_per_prompt,
1270
- num_channels_latents,
1271
- height,
1272
- width,
1273
- prompt_embeds.dtype,
1274
- device,
1275
- generator,
1276
- latents,
1277
- )
1278
-
1279
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1280
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1281
-
1282
- # 6.1 Add image embeds for IP-Adapter
1283
- added_cond_kwargs = (
1284
- {"image_embeds": image_embeds}
1285
- if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
1286
- else None
1287
- )
1288
-
1289
- # 6.2 Optionally get Guidance Scale Embedding
1290
- timestep_cond = None
1291
- if self.unet.config.time_cond_proj_dim is not None:
1292
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1293
- timestep_cond = self.get_guidance_scale_embedding(
1294
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1295
- ).to(device=device, dtype=latents.dtype)
1296
-
1297
- # 7. Denoising loop
1298
- if self.do_perturbed_attention_guidance:
1299
- down_layers = []
1300
- mid_layers = []
1301
- up_layers = []
1302
- for name, module in self.unet.named_modules():
1303
- if "attn1" in name and "to" not in name:
1304
- layer_type = name.split(".")[0].split("_")[0]
1305
- if layer_type == "down":
1306
- down_layers.append(module)
1307
- elif layer_type == "mid":
1308
- mid_layers.append(module)
1309
- elif layer_type == "up":
1310
- up_layers.append(module)
1311
- else:
1312
- raise ValueError(f"Invalid layer type: {layer_type}")
1313
-
1314
- # change attention layer in UNet if use PAG
1315
- if self.do_perturbed_attention_guidance:
1316
- if self.do_classifier_free_guidance:
1317
- replace_processor = PAGCFGIdentitySelfAttnProcessor()
1318
- else:
1319
- replace_processor = PAGIdentitySelfAttnProcessor()
1320
-
1321
- drop_layers = self.pag_applied_layers_index
1322
- for drop_layer in drop_layers:
1323
- try:
1324
- if drop_layer[0] == "d":
1325
- down_layers[int(drop_layer[1])].processor = replace_processor
1326
- elif drop_layer[0] == "m":
1327
- mid_layers[int(drop_layer[1])].processor = replace_processor
1328
- elif drop_layer[0] == "u":
1329
- up_layers[int(drop_layer[1])].processor = replace_processor
1330
- else:
1331
- raise ValueError(f"Invalid layer type: {drop_layer[0]}")
1332
- except IndexError:
1333
- raise ValueError(
1334
- f"Invalid layer index: {drop_layer}. Available layers: {len(down_layers)} down layers, {len(mid_layers)} mid layers, {len(up_layers)} up layers."
1335
- )
1336
-
1337
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1338
- self._num_timesteps = len(timesteps)
1339
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1340
- for i, t in enumerate(timesteps):
1341
- if self.interrupt:
1342
- continue
1343
-
1344
- # cfg
1345
- if self.do_classifier_free_guidance and not self.do_perturbed_attention_guidance:
1346
- latent_model_input = torch.cat([latents] * 2)
1347
- # pag
1348
- elif not self.do_classifier_free_guidance and self.do_perturbed_attention_guidance:
1349
- latent_model_input = torch.cat([latents] * 2)
1350
- # both
1351
- elif self.do_classifier_free_guidance and self.do_perturbed_attention_guidance:
1352
- latent_model_input = torch.cat([latents] * 3)
1353
- # no
1354
- else:
1355
- latent_model_input = latents
1356
-
1357
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1358
-
1359
- # predict the noise residual
1360
- noise_pred = self.unet(
1361
- latent_model_input,
1362
- t,
1363
- encoder_hidden_states=prompt_embeds,
1364
- timestep_cond=timestep_cond,
1365
- cross_attention_kwargs=self.cross_attention_kwargs,
1366
- added_cond_kwargs=added_cond_kwargs,
1367
- return_dict=False,
1368
- )[0]
1369
-
1370
- # perform guidance
1371
-
1372
- # cfg
1373
- if self.do_classifier_free_guidance and not self.do_perturbed_attention_guidance:
1374
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1375
-
1376
- delta = noise_pred_text - noise_pred_uncond
1377
- noise_pred = noise_pred_uncond + self.guidance_scale * delta
1378
-
1379
- # pag
1380
- elif not self.do_classifier_free_guidance and self.do_perturbed_attention_guidance:
1381
- noise_pred_original, noise_pred_perturb = noise_pred.chunk(2)
1382
-
1383
- signal_scale = self.pag_scale
1384
- if self.do_pag_adaptive_scaling:
1385
- signal_scale = self.pag_scale - self.pag_adaptive_scaling * (1000 - t)
1386
- if signal_scale < 0:
1387
- signal_scale = 0
1388
-
1389
- noise_pred = noise_pred_original + signal_scale * (noise_pred_original - noise_pred_perturb)
1390
-
1391
- # both
1392
- elif self.do_classifier_free_guidance and self.do_perturbed_attention_guidance:
1393
- noise_pred_uncond, noise_pred_text, noise_pred_text_perturb = noise_pred.chunk(3)
1394
-
1395
- signal_scale = self.pag_scale
1396
- if self.do_pag_adaptive_scaling:
1397
- signal_scale = self.pag_scale - self.pag_adaptive_scaling * (1000 - t)
1398
- if signal_scale < 0:
1399
- signal_scale = 0
1400
-
1401
- noise_pred = (
1402
- noise_pred_text
1403
- + (self.guidance_scale - 1.0) * (noise_pred_text - noise_pred_uncond)
1404
- + signal_scale * (noise_pred_text - noise_pred_text_perturb)
1405
- )
1406
-
1407
- if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1408
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1409
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1410
-
1411
- # compute the previous noisy sample x_t -> x_t-1
1412
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1413
-
1414
- if callback_on_step_end is not None:
1415
- callback_kwargs = {}
1416
- for k in callback_on_step_end_tensor_inputs:
1417
- callback_kwargs[k] = locals()[k]
1418
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1419
-
1420
- latents = callback_outputs.pop("latents", latents)
1421
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1422
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1423
-
1424
- # call the callback, if provided
1425
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1426
- progress_bar.update()
1427
- if callback is not None and i % callback_steps == 0:
1428
- step_idx = i // getattr(self.scheduler, "order", 1)
1429
- callback(step_idx, t, latents)
1430
-
1431
- if not output_type == "latent":
1432
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1433
- 0
1434
- ]
1435
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1436
- else:
1437
- image = latents
1438
- has_nsfw_concept = None
1439
-
1440
- if has_nsfw_concept is None:
1441
- do_denormalize = [True] * image.shape[0]
1442
- else:
1443
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1444
-
1445
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1446
-
1447
- # Offload all models
1448
- self.maybe_free_model_hooks()
1449
-
1450
- # change attention layer in UNet if use PAG
1451
- if self.do_perturbed_attention_guidance:
1452
- drop_layers = self.pag_applied_layers_index
1453
- for drop_layer in drop_layers:
1454
- try:
1455
- if drop_layer[0] == "d":
1456
- down_layers[int(drop_layer[1])].processor = AttnProcessor2_0()
1457
- elif drop_layer[0] == "m":
1458
- mid_layers[int(drop_layer[1])].processor = AttnProcessor2_0()
1459
- elif drop_layer[0] == "u":
1460
- up_layers[int(drop_layer[1])].processor = AttnProcessor2_0()
1461
- else:
1462
- raise ValueError(f"Invalid layer type: {drop_layer[0]}")
1463
- except IndexError:
1464
- raise ValueError(
1465
- f"Invalid layer index: {drop_layer}. Available layers: {len(down_layers)} down layers, {len(mid_layers)} mid layers, {len(up_layers)} up layers."
1466
- )
1467
-
1468
- if not return_dict:
1469
- return (image, has_nsfw_concept)
1470
-
1471
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_upscale_ldm3d.py DELETED
@@ -1,772 +0,0 @@
1
- # Copyright 2024 The Intel Labs Team Authors and the HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Union
17
-
18
- import numpy as np
19
- import PIL
20
- import torch
21
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
22
-
23
- from diffusers import DiffusionPipeline
24
- from diffusers.image_processor import PipelineDepthInput, PipelineImageInput, VaeImageProcessorLDM3D
25
- from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
27
- from diffusers.models.lora import adjust_lora_scale_text_encoder
28
- from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
29
- from diffusers.pipelines.stable_diffusion_ldm3d.pipeline_stable_diffusion_ldm3d import LDM3DPipelineOutput
30
- from diffusers.schedulers import DDPMScheduler, KarrasDiffusionSchedulers
31
- from diffusers.utils import (
32
- USE_PEFT_BACKEND,
33
- deprecate,
34
- logging,
35
- scale_lora_layers,
36
- unscale_lora_layers,
37
- )
38
- from diffusers.utils.torch_utils import randn_tensor
39
-
40
-
41
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
-
43
- EXAMPLE_DOC_STRING = """
44
- Examples:
45
- ```python
46
- >>> from diffusers import StableDiffusionUpscaleLDM3DPipeline
47
- >>> from PIL import Image
48
- >>> from io import BytesIO
49
- >>> import requests
50
-
51
- >>> pipe = StableDiffusionUpscaleLDM3DPipeline.from_pretrained("Intel/ldm3d-sr")
52
- >>> pipe = pipe.to("cuda")
53
- >>> rgb_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_rgb.jpg"
54
- >>> depth_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_depth.png"
55
- >>> low_res_rgb = Image.open(BytesIO(requests.get(rgb_path).content)).convert("RGB")
56
- >>> low_res_depth = Image.open(BytesIO(requests.get(depth_path).content)).convert("L")
57
- >>> output = pipe(
58
- ... prompt="high quality high resolution uhd 4k image",
59
- ... rgb=low_res_rgb,
60
- ... depth=low_res_depth,
61
- ... num_inference_steps=50,
62
- ... target_res=[1024, 1024],
63
- ... )
64
- >>> rgb_image, depth_image = output.rgb, output.depth
65
- >>> rgb_image[0].save("hr_ldm3d_rgb.jpg")
66
- >>> depth_image[0].save("hr_ldm3d_depth.png")
67
- ```
68
- """
69
-
70
-
71
- class StableDiffusionUpscaleLDM3DPipeline(
72
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
73
- ):
74
- r"""
75
- Pipeline for text-to-image and 3D generation using LDM3D.
76
-
77
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
78
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
79
-
80
- The pipeline also inherits the following loading methods:
81
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
82
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
83
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
84
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
85
-
86
- Args:
87
- vae ([`AutoencoderKL`]):
88
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
89
- text_encoder ([`~transformers.CLIPTextModel`]):
90
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
91
- tokenizer ([`~transformers.CLIPTokenizer`]):
92
- A `CLIPTokenizer` to tokenize text.
93
- unet ([`UNet2DConditionModel`]):
94
- A `UNet2DConditionModel` to denoise the encoded image latents.
95
- low_res_scheduler ([`SchedulerMixin`]):
96
- A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of
97
- [`DDPMScheduler`].
98
- scheduler ([`SchedulerMixin`]):
99
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
100
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
101
- safety_checker ([`StableDiffusionSafetyChecker`]):
102
- Classification module that estimates whether generated images could be considered offensive or harmful.
103
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
104
- about a model's potential harms.
105
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
106
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
107
- """
108
-
109
- _optional_components = ["safety_checker", "feature_extractor"]
110
-
111
- def __init__(
112
- self,
113
- vae: AutoencoderKL,
114
- text_encoder: CLIPTextModel,
115
- tokenizer: CLIPTokenizer,
116
- unet: UNet2DConditionModel,
117
- low_res_scheduler: DDPMScheduler,
118
- scheduler: KarrasDiffusionSchedulers,
119
- safety_checker: StableDiffusionSafetyChecker,
120
- feature_extractor: CLIPImageProcessor,
121
- requires_safety_checker: bool = True,
122
- watermarker: Optional[Any] = None,
123
- max_noise_level: int = 350,
124
- ):
125
- super().__init__()
126
-
127
- if safety_checker is None and requires_safety_checker:
128
- logger.warning(
129
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
130
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
131
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
132
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
133
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
134
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
135
- )
136
-
137
- if safety_checker is not None and feature_extractor is None:
138
- raise ValueError(
139
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
140
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
141
- )
142
-
143
- self.register_modules(
144
- vae=vae,
145
- text_encoder=text_encoder,
146
- tokenizer=tokenizer,
147
- unet=unet,
148
- low_res_scheduler=low_res_scheduler,
149
- scheduler=scheduler,
150
- safety_checker=safety_checker,
151
- watermarker=watermarker,
152
- feature_extractor=feature_extractor,
153
- )
154
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
155
- self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor, resample="bilinear")
156
- # self.register_to_config(requires_safety_checker=requires_safety_checker)
157
- self.register_to_config(max_noise_level=max_noise_level)
158
-
159
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline._encode_prompt
160
- def _encode_prompt(
161
- self,
162
- prompt,
163
- device,
164
- num_images_per_prompt,
165
- do_classifier_free_guidance,
166
- negative_prompt=None,
167
- prompt_embeds: Optional[torch.Tensor] = None,
168
- negative_prompt_embeds: Optional[torch.Tensor] = None,
169
- lora_scale: Optional[float] = None,
170
- **kwargs,
171
- ):
172
- deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
173
- deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
174
-
175
- prompt_embeds_tuple = self.encode_prompt(
176
- prompt=prompt,
177
- device=device,
178
- num_images_per_prompt=num_images_per_prompt,
179
- do_classifier_free_guidance=do_classifier_free_guidance,
180
- negative_prompt=negative_prompt,
181
- prompt_embeds=prompt_embeds,
182
- negative_prompt_embeds=negative_prompt_embeds,
183
- lora_scale=lora_scale,
184
- **kwargs,
185
- )
186
-
187
- # concatenate for backwards comp
188
- prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
189
-
190
- return prompt_embeds
191
-
192
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline.encode_prompt
193
- def encode_prompt(
194
- self,
195
- prompt,
196
- device,
197
- num_images_per_prompt,
198
- do_classifier_free_guidance,
199
- negative_prompt=None,
200
- prompt_embeds: Optional[torch.Tensor] = None,
201
- negative_prompt_embeds: Optional[torch.Tensor] = None,
202
- lora_scale: Optional[float] = None,
203
- clip_skip: Optional[int] = None,
204
- ):
205
- r"""
206
- Encodes the prompt into text encoder hidden states.
207
-
208
- Args:
209
- prompt (`str` or `List[str]`, *optional*):
210
- prompt to be encoded
211
- device: (`torch.device`):
212
- torch device
213
- num_images_per_prompt (`int`):
214
- number of images that should be generated per prompt
215
- do_classifier_free_guidance (`bool`):
216
- whether to use classifier free guidance or not
217
- negative_prompt (`str` or `List[str]`, *optional*):
218
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
219
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
220
- less than `1`).
221
- prompt_embeds (`torch.Tensor`, *optional*):
222
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
223
- provided, text embeddings will be generated from `prompt` input argument.
224
- negative_prompt_embeds (`torch.Tensor`, *optional*):
225
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
226
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
227
- argument.
228
- lora_scale (`float`, *optional*):
229
- A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
230
- clip_skip (`int`, *optional*):
231
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
232
- the output of the pre-final layer will be used for computing the prompt embeddings.
233
- """
234
- # set lora scale so that monkey patched LoRA
235
- # function of text encoder can correctly access it
236
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
237
- self._lora_scale = lora_scale
238
-
239
- # dynamically adjust the LoRA scale
240
- if not USE_PEFT_BACKEND:
241
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
242
- else:
243
- scale_lora_layers(self.text_encoder, lora_scale)
244
-
245
- if prompt is not None and isinstance(prompt, str):
246
- batch_size = 1
247
- elif prompt is not None and isinstance(prompt, list):
248
- batch_size = len(prompt)
249
- else:
250
- batch_size = prompt_embeds.shape[0]
251
-
252
- if prompt_embeds is None:
253
- # textual inversion: process multi-vector tokens if necessary
254
- if isinstance(self, TextualInversionLoaderMixin):
255
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
256
-
257
- text_inputs = self.tokenizer(
258
- prompt,
259
- padding="max_length",
260
- max_length=self.tokenizer.model_max_length,
261
- truncation=True,
262
- return_tensors="pt",
263
- )
264
- text_input_ids = text_inputs.input_ids
265
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
266
-
267
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
268
- text_input_ids, untruncated_ids
269
- ):
270
- removed_text = self.tokenizer.batch_decode(
271
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
272
- )
273
- logger.warning(
274
- "The following part of your input was truncated because CLIP can only handle sequences up to"
275
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
276
- )
277
-
278
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
279
- attention_mask = text_inputs.attention_mask.to(device)
280
- else:
281
- attention_mask = None
282
-
283
- if clip_skip is None:
284
- prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
285
- prompt_embeds = prompt_embeds[0]
286
- else:
287
- prompt_embeds = self.text_encoder(
288
- text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
289
- )
290
- # Access the `hidden_states` first, that contains a tuple of
291
- # all the hidden states from the encoder layers. Then index into
292
- # the tuple to access the hidden states from the desired layer.
293
- prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
294
- # We also need to apply the final LayerNorm here to not mess with the
295
- # representations. The `last_hidden_states` that we typically use for
296
- # obtaining the final prompt representations passes through the LayerNorm
297
- # layer.
298
- prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
299
-
300
- if self.text_encoder is not None:
301
- prompt_embeds_dtype = self.text_encoder.dtype
302
- elif self.unet is not None:
303
- prompt_embeds_dtype = self.unet.dtype
304
- else:
305
- prompt_embeds_dtype = prompt_embeds.dtype
306
-
307
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
308
-
309
- bs_embed, seq_len, _ = prompt_embeds.shape
310
- # duplicate text embeddings for each generation per prompt, using mps friendly method
311
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
312
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
313
-
314
- # get unconditional embeddings for classifier free guidance
315
- if do_classifier_free_guidance and negative_prompt_embeds is None:
316
- uncond_tokens: List[str]
317
- if negative_prompt is None:
318
- uncond_tokens = [""] * batch_size
319
- elif prompt is not None and type(prompt) is not type(negative_prompt):
320
- raise TypeError(
321
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
322
- f" {type(prompt)}."
323
- )
324
- elif isinstance(negative_prompt, str):
325
- uncond_tokens = [negative_prompt]
326
- elif batch_size != len(negative_prompt):
327
- raise ValueError(
328
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
329
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
330
- " the batch size of `prompt`."
331
- )
332
- else:
333
- uncond_tokens = negative_prompt
334
-
335
- # textual inversion: process multi-vector tokens if necessary
336
- if isinstance(self, TextualInversionLoaderMixin):
337
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
338
-
339
- max_length = prompt_embeds.shape[1]
340
- uncond_input = self.tokenizer(
341
- uncond_tokens,
342
- padding="max_length",
343
- max_length=max_length,
344
- truncation=True,
345
- return_tensors="pt",
346
- )
347
-
348
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
349
- attention_mask = uncond_input.attention_mask.to(device)
350
- else:
351
- attention_mask = None
352
-
353
- negative_prompt_embeds = self.text_encoder(
354
- uncond_input.input_ids.to(device),
355
- attention_mask=attention_mask,
356
- )
357
- negative_prompt_embeds = negative_prompt_embeds[0]
358
-
359
- if do_classifier_free_guidance:
360
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
361
- seq_len = negative_prompt_embeds.shape[1]
362
-
363
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
364
-
365
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
366
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
367
-
368
- if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
369
- # Retrieve the original scale by scaling back the LoRA layers
370
- unscale_lora_layers(self.text_encoder, lora_scale)
371
-
372
- return prompt_embeds, negative_prompt_embeds
373
-
374
- def run_safety_checker(self, image, device, dtype):
375
- if self.safety_checker is None:
376
- has_nsfw_concept = None
377
- else:
378
- if torch.is_tensor(image):
379
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
380
- else:
381
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
382
- rgb_feature_extractor_input = feature_extractor_input[0]
383
- safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors="pt").to(device)
384
- image, has_nsfw_concept = self.safety_checker(
385
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
386
- )
387
- return image, has_nsfw_concept
388
-
389
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
390
- def prepare_extra_step_kwargs(self, generator, eta):
391
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
392
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
393
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
394
- # and should be between [0, 1]
395
-
396
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
397
- extra_step_kwargs = {}
398
- if accepts_eta:
399
- extra_step_kwargs["eta"] = eta
400
-
401
- # check if the scheduler accepts generator
402
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
403
- if accepts_generator:
404
- extra_step_kwargs["generator"] = generator
405
- return extra_step_kwargs
406
-
407
- def check_inputs(
408
- self,
409
- prompt,
410
- image,
411
- noise_level,
412
- callback_steps,
413
- negative_prompt=None,
414
- prompt_embeds=None,
415
- negative_prompt_embeds=None,
416
- target_res=None,
417
- ):
418
- if (callback_steps is None) or (
419
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
420
- ):
421
- raise ValueError(
422
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
423
- f" {type(callback_steps)}."
424
- )
425
-
426
- if prompt is not None and prompt_embeds is not None:
427
- raise ValueError(
428
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
429
- " only forward one of the two."
430
- )
431
- elif prompt is None and prompt_embeds is None:
432
- raise ValueError(
433
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
434
- )
435
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
436
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
437
-
438
- if negative_prompt is not None and negative_prompt_embeds is not None:
439
- raise ValueError(
440
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
441
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
442
- )
443
-
444
- if prompt_embeds is not None and negative_prompt_embeds is not None:
445
- if prompt_embeds.shape != negative_prompt_embeds.shape:
446
- raise ValueError(
447
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
448
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
449
- f" {negative_prompt_embeds.shape}."
450
- )
451
-
452
- if (
453
- not isinstance(image, torch.Tensor)
454
- and not isinstance(image, PIL.Image.Image)
455
- and not isinstance(image, np.ndarray)
456
- and not isinstance(image, list)
457
- ):
458
- raise ValueError(
459
- f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}"
460
- )
461
-
462
- # verify batch size of prompt and image are same if image is a list or tensor or numpy array
463
- if isinstance(image, (list, np.ndarray, torch.Tensor)):
464
- if prompt is not None and isinstance(prompt, str):
465
- batch_size = 1
466
- elif prompt is not None and isinstance(prompt, list):
467
- batch_size = len(prompt)
468
- else:
469
- batch_size = prompt_embeds.shape[0]
470
-
471
- if isinstance(image, list):
472
- image_batch_size = len(image)
473
- else:
474
- image_batch_size = image.shape[0]
475
- if batch_size != image_batch_size:
476
- raise ValueError(
477
- f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}."
478
- " Please make sure that passed `prompt` matches the batch size of `image`."
479
- )
480
-
481
- # check noise level
482
- if noise_level > self.config.max_noise_level:
483
- raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}")
484
-
485
- if (callback_steps is None) or (
486
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
487
- ):
488
- raise ValueError(
489
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
490
- f" {type(callback_steps)}."
491
- )
492
-
493
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
494
- shape = (batch_size, num_channels_latents, height, width)
495
- if latents is None:
496
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
497
- else:
498
- if latents.shape != shape:
499
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
500
- latents = latents.to(device)
501
-
502
- # scale the initial noise by the standard deviation required by the scheduler
503
- latents = latents * self.scheduler.init_noise_sigma
504
- return latents
505
-
506
- # def upcast_vae(self):
507
- # dtype = self.vae.dtype
508
- # self.vae.to(dtype=torch.float32)
509
- # use_torch_2_0_or_xformers = isinstance(
510
- # self.vae.decoder.mid_block.attentions[0].processor,
511
- # (
512
- # AttnProcessor2_0,
513
- # XFormersAttnProcessor,
514
- # LoRAXFormersAttnProcessor,
515
- # LoRAAttnProcessor2_0,
516
- # ),
517
- # )
518
- # # if xformers or torch_2_0 is used attention block does not need
519
- # # to be in float32 which can save lots of memory
520
- # if use_torch_2_0_or_xformers:
521
- # self.vae.post_quant_conv.to(dtype)
522
- # self.vae.decoder.conv_in.to(dtype)
523
- # self.vae.decoder.mid_block.to(dtype)
524
-
525
- @torch.no_grad()
526
- def __call__(
527
- self,
528
- prompt: Union[str, List[str]] = None,
529
- rgb: PipelineImageInput = None,
530
- depth: PipelineDepthInput = None,
531
- num_inference_steps: int = 75,
532
- guidance_scale: float = 9.0,
533
- noise_level: int = 20,
534
- negative_prompt: Optional[Union[str, List[str]]] = None,
535
- num_images_per_prompt: Optional[int] = 1,
536
- eta: float = 0.0,
537
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
538
- latents: Optional[torch.Tensor] = None,
539
- prompt_embeds: Optional[torch.Tensor] = None,
540
- negative_prompt_embeds: Optional[torch.Tensor] = None,
541
- output_type: Optional[str] = "pil",
542
- return_dict: bool = True,
543
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
544
- callback_steps: int = 1,
545
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
546
- target_res: Optional[List[int]] = [1024, 1024],
547
- ):
548
- r"""
549
- The call function to the pipeline for generation.
550
-
551
- Args:
552
- prompt (`str` or `List[str]`, *optional*):
553
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
554
- image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
555
- `Image` or tensor representing an image batch to be upscaled.
556
- num_inference_steps (`int`, *optional*, defaults to 50):
557
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
558
- expense of slower inference.
559
- guidance_scale (`float`, *optional*, defaults to 5.0):
560
- A higher guidance scale value encourages the model to generate images closely linked to the text
561
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
562
- negative_prompt (`str` or `List[str]`, *optional*):
563
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
564
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
565
- num_images_per_prompt (`int`, *optional*, defaults to 1):
566
- The number of images to generate per prompt.
567
- eta (`float`, *optional*, defaults to 0.0):
568
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
569
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
570
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
571
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
572
- generation deterministic.
573
- latents (`torch.Tensor`, *optional*):
574
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
575
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
576
- tensor is generated by sampling using the supplied random `generator`.
577
- prompt_embeds (`torch.Tensor`, *optional*):
578
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
579
- provided, text embeddings are generated from the `prompt` input argument.
580
- negative_prompt_embeds (`torch.Tensor`, *optional*):
581
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
582
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
583
- output_type (`str`, *optional*, defaults to `"pil"`):
584
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
585
- return_dict (`bool`, *optional*, defaults to `True`):
586
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
587
- plain tuple.
588
- callback (`Callable`, *optional*):
589
- A function that calls every `callback_steps` steps during inference. The function is called with the
590
- following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
591
- callback_steps (`int`, *optional*, defaults to 1):
592
- The frequency at which the `callback` function is called. If not specified, the callback is called at
593
- every step.
594
- cross_attention_kwargs (`dict`, *optional*):
595
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
596
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
597
-
598
- Examples:
599
-
600
- Returns:
601
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
602
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
603
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
604
- second element is a list of `bool`s indicating whether the corresponding generated image contains
605
- "not-safe-for-work" (nsfw) content.
606
- """
607
- # 1. Check inputs. Raise error if not correct
608
- self.check_inputs(
609
- prompt,
610
- rgb,
611
- noise_level,
612
- callback_steps,
613
- negative_prompt,
614
- prompt_embeds,
615
- negative_prompt_embeds,
616
- )
617
- # 2. Define call parameters
618
- if prompt is not None and isinstance(prompt, str):
619
- batch_size = 1
620
- elif prompt is not None and isinstance(prompt, list):
621
- batch_size = len(prompt)
622
- else:
623
- batch_size = prompt_embeds.shape[0]
624
-
625
- device = self._execution_device
626
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
627
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
628
- # corresponds to doing no classifier free guidance.
629
- do_classifier_free_guidance = guidance_scale > 1.0
630
-
631
- # 3. Encode input prompt
632
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
633
- prompt,
634
- device,
635
- num_images_per_prompt,
636
- do_classifier_free_guidance,
637
- negative_prompt,
638
- prompt_embeds=prompt_embeds,
639
- negative_prompt_embeds=negative_prompt_embeds,
640
- )
641
- # For classifier free guidance, we need to do two forward passes.
642
- # Here we concatenate the unconditional and text embeddings into a single batch
643
- # to avoid doing two forward passes
644
- if do_classifier_free_guidance:
645
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
646
-
647
- # 4. Preprocess image
648
- rgb, depth = self.image_processor.preprocess(rgb, depth, target_res=target_res)
649
- rgb = rgb.to(dtype=prompt_embeds.dtype, device=device)
650
- depth = depth.to(dtype=prompt_embeds.dtype, device=device)
651
-
652
- # 5. set timesteps
653
- self.scheduler.set_timesteps(num_inference_steps, device=device)
654
- timesteps = self.scheduler.timesteps
655
-
656
- # 6. Encode low resolutiom image to latent space
657
- image = torch.cat([rgb, depth], axis=1)
658
- latent_space_image = self.vae.encode(image).latent_dist.sample(generator)
659
- latent_space_image *= self.vae.scaling_factor
660
- noise_level = torch.tensor([noise_level], dtype=torch.long, device=device)
661
- # noise_rgb = randn_tensor(rgb.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
662
- # rgb = self.low_res_scheduler.add_noise(rgb, noise_rgb, noise_level)
663
- # noise_depth = randn_tensor(depth.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
664
- # depth = self.low_res_scheduler.add_noise(depth, noise_depth, noise_level)
665
-
666
- batch_multiplier = 2 if do_classifier_free_guidance else 1
667
- latent_space_image = torch.cat([latent_space_image] * batch_multiplier * num_images_per_prompt)
668
- noise_level = torch.cat([noise_level] * latent_space_image.shape[0])
669
-
670
- # 7. Prepare latent variables
671
- height, width = latent_space_image.shape[2:]
672
- num_channels_latents = self.vae.config.latent_channels
673
-
674
- latents = self.prepare_latents(
675
- batch_size * num_images_per_prompt,
676
- num_channels_latents,
677
- height,
678
- width,
679
- prompt_embeds.dtype,
680
- device,
681
- generator,
682
- latents,
683
- )
684
-
685
- # 8. Check that sizes of image and latents match
686
- num_channels_image = latent_space_image.shape[1]
687
- if num_channels_latents + num_channels_image != self.unet.config.in_channels:
688
- raise ValueError(
689
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
690
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
691
- f" `num_channels_image`: {num_channels_image} "
692
- f" = {num_channels_latents+num_channels_image}. Please verify the config of"
693
- " `pipeline.unet` or your `image` input."
694
- )
695
-
696
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
697
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
698
-
699
- # 10. Denoising loop
700
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
701
- with self.progress_bar(total=num_inference_steps) as progress_bar:
702
- for i, t in enumerate(timesteps):
703
- # expand the latents if we are doing classifier free guidance
704
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
705
-
706
- # concat latents, mask, masked_image_latents in the channel dimension
707
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
708
- latent_model_input = torch.cat([latent_model_input, latent_space_image], dim=1)
709
-
710
- # predict the noise residual
711
- noise_pred = self.unet(
712
- latent_model_input,
713
- t,
714
- encoder_hidden_states=prompt_embeds,
715
- cross_attention_kwargs=cross_attention_kwargs,
716
- class_labels=noise_level,
717
- return_dict=False,
718
- )[0]
719
-
720
- # perform guidance
721
- if do_classifier_free_guidance:
722
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
723
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
724
-
725
- # compute the previous noisy sample x_t -> x_t-1
726
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
727
-
728
- # call the callback, if provided
729
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
730
- progress_bar.update()
731
- if callback is not None and i % callback_steps == 0:
732
- callback(i, t, latents)
733
-
734
- if not output_type == "latent":
735
- # make sure the VAE is in float32 mode, as it overflows in float16
736
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
737
-
738
- if needs_upcasting:
739
- self.upcast_vae()
740
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
741
-
742
- image = self.vae.decode(latents / self.vae.scaling_factor, return_dict=False)[0]
743
-
744
- # cast back to fp16 if needed
745
- if needs_upcasting:
746
- self.vae.to(dtype=torch.float16)
747
-
748
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
749
-
750
- else:
751
- image = latents
752
- has_nsfw_concept = None
753
-
754
- if has_nsfw_concept is None:
755
- do_denormalize = [True] * image.shape[0]
756
- else:
757
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
758
-
759
- rgb, depth = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
760
-
761
- # 11. Apply watermark
762
- if output_type == "pil" and self.watermarker is not None:
763
- rgb = self.watermarker.apply_watermark(rgb)
764
-
765
- # Offload last model to CPU
766
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
767
- self.final_offload_hook.offload()
768
-
769
- if not return_dict:
770
- return ((rgb, depth), has_nsfw_concept)
771
-
772
- return LDM3DPipelineOutput(rgb=rgb, depth=depth, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_xl_controlnet_adapter.py DELETED
@@ -1,1411 +0,0 @@
1
- # Copyright 2024 TencentARC and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import inspect
17
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import PIL.Image
21
- import torch
22
- import torch.nn.functional as F
23
- from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
24
-
25
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
- from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
27
- from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
28
- from diffusers.models.attention_processor import (
29
- AttnProcessor2_0,
30
- LoRAAttnProcessor2_0,
31
- LoRAXFormersAttnProcessor,
32
- XFormersAttnProcessor,
33
- )
34
- from diffusers.models.lora import adjust_lora_scale_text_encoder
35
- from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
36
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
37
- from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
38
- from diffusers.schedulers import KarrasDiffusionSchedulers
39
- from diffusers.utils import (
40
- PIL_INTERPOLATION,
41
- USE_PEFT_BACKEND,
42
- logging,
43
- replace_example_docstring,
44
- scale_lora_layers,
45
- unscale_lora_layers,
46
- )
47
- from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
48
-
49
-
50
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
51
-
52
- EXAMPLE_DOC_STRING = """
53
- Examples:
54
- ```py
55
- >>> import torch
56
- >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler
57
- >>> from diffusers.utils import load_image
58
- >>> from controlnet_aux.midas import MidasDetector
59
-
60
- >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
61
- >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
62
-
63
- >>> image = load_image(img_url).resize((1024, 1024))
64
- >>> mask_image = load_image(mask_url).resize((1024, 1024))
65
-
66
- >>> midas_depth = MidasDetector.from_pretrained(
67
- ... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
68
- ... ).to("cuda")
69
-
70
- >>> depth_image = midas_depth(
71
- ... image, detect_resolution=512, image_resolution=1024
72
- ... )
73
-
74
- >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0"
75
-
76
- >>> adapter = T2IAdapter.from_pretrained(
77
- ... "Adapter/t2iadapter",
78
- ... subfolder="sketch_sdxl_1.0",
79
- ... torch_dtype=torch.float16,
80
- ... adapter_type="full_adapter_xl",
81
- ... )
82
-
83
- >>> controlnet = ControlNetModel.from_pretrained(
84
- ... "diffusers/controlnet-depth-sdxl-1.0",
85
- ... torch_dtype=torch.float16,
86
- ... variant="fp16",
87
- ... use_safetensors=True
88
- ... ).to("cuda")
89
-
90
- >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler")
91
-
92
- >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
93
- ... model_id,
94
- ... adapter=adapter,
95
- ... controlnet=controlnet,
96
- ... torch_dtype=torch.float16,
97
- ... variant="fp16",
98
- ... scheduler=scheduler
99
- ... ).to("cuda")
100
-
101
- >>> strength = 0.5
102
-
103
- >>> generator = torch.manual_seed(42)
104
- >>> sketch_image_out = pipe(
105
- ... prompt="a photo of a tiger sitting on a park bench",
106
- ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",
107
- ... adapter_image=depth_image,
108
- ... control_image=mask_image,
109
- ... adapter_conditioning_scale=strength,
110
- ... controlnet_conditioning_scale=strength,
111
- ... generator=generator,
112
- ... guidance_scale=7.5,
113
- ... ).images[0]
114
- ```
115
- """
116
-
117
-
118
- def _preprocess_adapter_image(image, height, width):
119
- if isinstance(image, torch.Tensor):
120
- return image
121
- elif isinstance(image, PIL.Image.Image):
122
- image = [image]
123
-
124
- if isinstance(image[0], PIL.Image.Image):
125
- image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
126
- image = [
127
- i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
128
- ] # expand [h, w] or [h, w, c] to [b, h, w, c]
129
- image = np.concatenate(image, axis=0)
130
- image = np.array(image).astype(np.float32) / 255.0
131
- image = image.transpose(0, 3, 1, 2)
132
- image = torch.from_numpy(image)
133
- elif isinstance(image[0], torch.Tensor):
134
- if image[0].ndim == 3:
135
- image = torch.stack(image, dim=0)
136
- elif image[0].ndim == 4:
137
- image = torch.cat(image, dim=0)
138
- else:
139
- raise ValueError(
140
- f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
141
- )
142
- return image
143
-
144
-
145
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
146
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
147
- """
148
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
149
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
150
- """
151
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
152
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
153
- # rescale the results from guidance (fixes overexposure)
154
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
155
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
156
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
157
- return noise_cfg
158
-
159
-
160
- class StableDiffusionXLControlNetAdapterPipeline(
161
- DiffusionPipeline,
162
- StableDiffusionMixin,
163
- FromSingleFileMixin,
164
- StableDiffusionXLLoraLoaderMixin,
165
- TextualInversionLoaderMixin,
166
- ):
167
- r"""
168
- Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
169
- https://arxiv.org/abs/2302.08453
170
-
171
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
172
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
173
-
174
- Args:
175
- adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
176
- Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
177
- list, the outputs from each Adapter are added together to create one combined additional conditioning.
178
- adapter_weights (`List[float]`, *optional*, defaults to None):
179
- List of floats representing the weight which will be multiply to each adapter's output before adding them
180
- together.
181
- vae ([`AutoencoderKL`]):
182
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
183
- text_encoder ([`CLIPTextModel`]):
184
- Frozen text-encoder. Stable Diffusion uses the text portion of
185
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
186
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
187
- tokenizer (`CLIPTokenizer`):
188
- Tokenizer of class
189
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
190
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
191
- scheduler ([`SchedulerMixin`]):
192
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
193
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
194
- safety_checker ([`StableDiffusionSafetyChecker`]):
195
- Classification module that estimates whether generated images could be considered offensive or harmful.
196
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
197
- feature_extractor ([`CLIPFeatureExtractor`]):
198
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
199
- """
200
-
201
- model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
202
- _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"]
203
-
204
- def __init__(
205
- self,
206
- vae: AutoencoderKL,
207
- text_encoder: CLIPTextModel,
208
- text_encoder_2: CLIPTextModelWithProjection,
209
- tokenizer: CLIPTokenizer,
210
- tokenizer_2: CLIPTokenizer,
211
- unet: UNet2DConditionModel,
212
- adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]],
213
- controlnet: Union[ControlNetModel, MultiControlNetModel],
214
- scheduler: KarrasDiffusionSchedulers,
215
- force_zeros_for_empty_prompt: bool = True,
216
- ):
217
- super().__init__()
218
-
219
- if isinstance(controlnet, (list, tuple)):
220
- controlnet = MultiControlNetModel(controlnet)
221
-
222
- self.register_modules(
223
- vae=vae,
224
- text_encoder=text_encoder,
225
- text_encoder_2=text_encoder_2,
226
- tokenizer=tokenizer,
227
- tokenizer_2=tokenizer_2,
228
- unet=unet,
229
- adapter=adapter,
230
- controlnet=controlnet,
231
- scheduler=scheduler,
232
- )
233
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
234
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
235
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
236
- self.control_image_processor = VaeImageProcessor(
237
- vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
238
- )
239
- self.default_sample_size = self.unet.config.sample_size
240
-
241
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
242
- def encode_prompt(
243
- self,
244
- prompt: str,
245
- prompt_2: Optional[str] = None,
246
- device: Optional[torch.device] = None,
247
- num_images_per_prompt: int = 1,
248
- do_classifier_free_guidance: bool = True,
249
- negative_prompt: Optional[str] = None,
250
- negative_prompt_2: Optional[str] = None,
251
- prompt_embeds: Optional[torch.Tensor] = None,
252
- negative_prompt_embeds: Optional[torch.Tensor] = None,
253
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
254
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
255
- lora_scale: Optional[float] = None,
256
- clip_skip: Optional[int] = None,
257
- ):
258
- r"""
259
- Encodes the prompt into text encoder hidden states.
260
-
261
- Args:
262
- prompt (`str` or `List[str]`, *optional*):
263
- prompt to be encoded
264
- prompt_2 (`str` or `List[str]`, *optional*):
265
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
266
- used in both text-encoders
267
- device: (`torch.device`):
268
- torch device
269
- num_images_per_prompt (`int`):
270
- number of images that should be generated per prompt
271
- do_classifier_free_guidance (`bool`):
272
- whether to use classifier free guidance or not
273
- negative_prompt (`str` or `List[str]`, *optional*):
274
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
275
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
276
- less than `1`).
277
- negative_prompt_2 (`str` or `List[str]`, *optional*):
278
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
279
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
280
- prompt_embeds (`torch.Tensor`, *optional*):
281
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
282
- provided, text embeddings will be generated from `prompt` input argument.
283
- negative_prompt_embeds (`torch.Tensor`, *optional*):
284
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
285
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
286
- argument.
287
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
288
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
289
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
290
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
291
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
292
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
293
- input argument.
294
- lora_scale (`float`, *optional*):
295
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
296
- clip_skip (`int`, *optional*):
297
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
298
- the output of the pre-final layer will be used for computing the prompt embeddings.
299
- """
300
- device = device or self._execution_device
301
-
302
- # set lora scale so that monkey patched LoRA
303
- # function of text encoder can correctly access it
304
- if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
305
- self._lora_scale = lora_scale
306
-
307
- # dynamically adjust the LoRA scale
308
- if self.text_encoder is not None:
309
- if not USE_PEFT_BACKEND:
310
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
311
- else:
312
- scale_lora_layers(self.text_encoder, lora_scale)
313
-
314
- if self.text_encoder_2 is not None:
315
- if not USE_PEFT_BACKEND:
316
- adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
317
- else:
318
- scale_lora_layers(self.text_encoder_2, lora_scale)
319
-
320
- prompt = [prompt] if isinstance(prompt, str) else prompt
321
-
322
- if prompt is not None:
323
- batch_size = len(prompt)
324
- else:
325
- batch_size = prompt_embeds.shape[0]
326
-
327
- # Define tokenizers and text encoders
328
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
329
- text_encoders = (
330
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
331
- )
332
-
333
- if prompt_embeds is None:
334
- prompt_2 = prompt_2 or prompt
335
- prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
336
-
337
- # textual inversion: process multi-vector tokens if necessary
338
- prompt_embeds_list = []
339
- prompts = [prompt, prompt_2]
340
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
341
- if isinstance(self, TextualInversionLoaderMixin):
342
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
343
-
344
- text_inputs = tokenizer(
345
- prompt,
346
- padding="max_length",
347
- max_length=tokenizer.model_max_length,
348
- truncation=True,
349
- return_tensors="pt",
350
- )
351
-
352
- text_input_ids = text_inputs.input_ids
353
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
354
-
355
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
356
- text_input_ids, untruncated_ids
357
- ):
358
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
359
- logger.warning(
360
- "The following part of your input was truncated because CLIP can only handle sequences up to"
361
- f" {tokenizer.model_max_length} tokens: {removed_text}"
362
- )
363
-
364
- prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
365
-
366
- # We are only ALWAYS interested in the pooled output of the final text encoder
367
- pooled_prompt_embeds = prompt_embeds[0]
368
- if clip_skip is None:
369
- prompt_embeds = prompt_embeds.hidden_states[-2]
370
- else:
371
- # "2" because SDXL always indexes from the penultimate layer.
372
- prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
373
-
374
- prompt_embeds_list.append(prompt_embeds)
375
-
376
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
377
-
378
- # get unconditional embeddings for classifier free guidance
379
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
380
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
381
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
382
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
383
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
384
- negative_prompt = negative_prompt or ""
385
- negative_prompt_2 = negative_prompt_2 or negative_prompt
386
-
387
- # normalize str to list
388
- negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
389
- negative_prompt_2 = (
390
- batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
391
- )
392
-
393
- uncond_tokens: List[str]
394
- if prompt is not None and type(prompt) is not type(negative_prompt):
395
- raise TypeError(
396
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
397
- f" {type(prompt)}."
398
- )
399
- elif batch_size != len(negative_prompt):
400
- raise ValueError(
401
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
402
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
403
- " the batch size of `prompt`."
404
- )
405
- else:
406
- uncond_tokens = [negative_prompt, negative_prompt_2]
407
-
408
- negative_prompt_embeds_list = []
409
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
410
- if isinstance(self, TextualInversionLoaderMixin):
411
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
412
-
413
- max_length = prompt_embeds.shape[1]
414
- uncond_input = tokenizer(
415
- negative_prompt,
416
- padding="max_length",
417
- max_length=max_length,
418
- truncation=True,
419
- return_tensors="pt",
420
- )
421
-
422
- negative_prompt_embeds = text_encoder(
423
- uncond_input.input_ids.to(device),
424
- output_hidden_states=True,
425
- )
426
- # We are only ALWAYS interested in the pooled output of the final text encoder
427
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
428
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
429
-
430
- negative_prompt_embeds_list.append(negative_prompt_embeds)
431
-
432
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
433
-
434
- if self.text_encoder_2 is not None:
435
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
436
- else:
437
- prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
438
-
439
- bs_embed, seq_len, _ = prompt_embeds.shape
440
- # duplicate text embeddings for each generation per prompt, using mps friendly method
441
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
442
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
443
-
444
- if do_classifier_free_guidance:
445
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
446
- seq_len = negative_prompt_embeds.shape[1]
447
-
448
- if self.text_encoder_2 is not None:
449
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
450
- else:
451
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
452
-
453
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
454
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
455
-
456
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
457
- bs_embed * num_images_per_prompt, -1
458
- )
459
- if do_classifier_free_guidance:
460
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
461
- bs_embed * num_images_per_prompt, -1
462
- )
463
-
464
- if self.text_encoder is not None:
465
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
466
- # Retrieve the original scale by scaling back the LoRA layers
467
- unscale_lora_layers(self.text_encoder, lora_scale)
468
-
469
- if self.text_encoder_2 is not None:
470
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
471
- # Retrieve the original scale by scaling back the LoRA layers
472
- unscale_lora_layers(self.text_encoder_2, lora_scale)
473
-
474
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
475
-
476
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
477
- def prepare_extra_step_kwargs(self, generator, eta):
478
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
479
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
480
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
481
- # and should be between [0, 1]
482
-
483
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
484
- extra_step_kwargs = {}
485
- if accepts_eta:
486
- extra_step_kwargs["eta"] = eta
487
-
488
- # check if the scheduler accepts generator
489
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
490
- if accepts_generator:
491
- extra_step_kwargs["generator"] = generator
492
- return extra_step_kwargs
493
-
494
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
495
- def check_image(self, image, prompt, prompt_embeds):
496
- image_is_pil = isinstance(image, PIL.Image.Image)
497
- image_is_tensor = isinstance(image, torch.Tensor)
498
- image_is_np = isinstance(image, np.ndarray)
499
- image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
500
- image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
501
- image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
502
-
503
- if (
504
- not image_is_pil
505
- and not image_is_tensor
506
- and not image_is_np
507
- and not image_is_pil_list
508
- and not image_is_tensor_list
509
- and not image_is_np_list
510
- ):
511
- raise TypeError(
512
- f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
513
- )
514
-
515
- if image_is_pil:
516
- image_batch_size = 1
517
- else:
518
- image_batch_size = len(image)
519
-
520
- if prompt is not None and isinstance(prompt, str):
521
- prompt_batch_size = 1
522
- elif prompt is not None and isinstance(prompt, list):
523
- prompt_batch_size = len(prompt)
524
- elif prompt_embeds is not None:
525
- prompt_batch_size = prompt_embeds.shape[0]
526
-
527
- if image_batch_size != 1 and image_batch_size != prompt_batch_size:
528
- raise ValueError(
529
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
530
- )
531
-
532
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs
533
- def check_inputs(
534
- self,
535
- prompt,
536
- prompt_2,
537
- height,
538
- width,
539
- callback_steps,
540
- negative_prompt=None,
541
- negative_prompt_2=None,
542
- prompt_embeds=None,
543
- negative_prompt_embeds=None,
544
- pooled_prompt_embeds=None,
545
- negative_pooled_prompt_embeds=None,
546
- callback_on_step_end_tensor_inputs=None,
547
- ):
548
- if height % 8 != 0 or width % 8 != 0:
549
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
550
-
551
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
552
- raise ValueError(
553
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
554
- f" {type(callback_steps)}."
555
- )
556
-
557
- if callback_on_step_end_tensor_inputs is not None and not all(
558
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
559
- ):
560
- raise ValueError(
561
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
562
- )
563
-
564
- if prompt is not None and prompt_embeds is not None:
565
- raise ValueError(
566
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
567
- " only forward one of the two."
568
- )
569
- elif prompt_2 is not None and prompt_embeds is not None:
570
- raise ValueError(
571
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
572
- " only forward one of the two."
573
- )
574
- elif prompt is None and prompt_embeds is None:
575
- raise ValueError(
576
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
577
- )
578
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
579
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
580
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
581
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
582
-
583
- if negative_prompt is not None and negative_prompt_embeds is not None:
584
- raise ValueError(
585
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
586
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
587
- )
588
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
589
- raise ValueError(
590
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
591
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
592
- )
593
-
594
- if prompt_embeds is not None and negative_prompt_embeds is not None:
595
- if prompt_embeds.shape != negative_prompt_embeds.shape:
596
- raise ValueError(
597
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
598
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
599
- f" {negative_prompt_embeds.shape}."
600
- )
601
-
602
- if prompt_embeds is not None and pooled_prompt_embeds is None:
603
- raise ValueError(
604
- "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
605
- )
606
-
607
- if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
608
- raise ValueError(
609
- "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
610
- )
611
-
612
- def check_conditions(
613
- self,
614
- prompt,
615
- prompt_embeds,
616
- adapter_image,
617
- control_image,
618
- adapter_conditioning_scale,
619
- controlnet_conditioning_scale,
620
- control_guidance_start,
621
- control_guidance_end,
622
- ):
623
- # controlnet checks
624
- if not isinstance(control_guidance_start, (tuple, list)):
625
- control_guidance_start = [control_guidance_start]
626
-
627
- if not isinstance(control_guidance_end, (tuple, list)):
628
- control_guidance_end = [control_guidance_end]
629
-
630
- if len(control_guidance_start) != len(control_guidance_end):
631
- raise ValueError(
632
- f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
633
- )
634
-
635
- if isinstance(self.controlnet, MultiControlNetModel):
636
- if len(control_guidance_start) != len(self.controlnet.nets):
637
- raise ValueError(
638
- f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
639
- )
640
-
641
- for start, end in zip(control_guidance_start, control_guidance_end):
642
- if start >= end:
643
- raise ValueError(
644
- f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
645
- )
646
- if start < 0.0:
647
- raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
648
- if end > 1.0:
649
- raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
650
-
651
- # Check controlnet `image`
652
- is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
653
- self.controlnet, torch._dynamo.eval_frame.OptimizedModule
654
- )
655
- if (
656
- isinstance(self.controlnet, ControlNetModel)
657
- or is_compiled
658
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
659
- ):
660
- self.check_image(control_image, prompt, prompt_embeds)
661
- elif (
662
- isinstance(self.controlnet, MultiControlNetModel)
663
- or is_compiled
664
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
665
- ):
666
- if not isinstance(control_image, list):
667
- raise TypeError("For multiple controlnets: `control_image` must be type `list`")
668
-
669
- # When `image` is a nested list:
670
- # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
671
- elif any(isinstance(i, list) for i in control_image):
672
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
673
- elif len(control_image) != len(self.controlnet.nets):
674
- raise ValueError(
675
- f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets."
676
- )
677
-
678
- for image_ in control_image:
679
- self.check_image(image_, prompt, prompt_embeds)
680
- else:
681
- assert False
682
-
683
- # Check `controlnet_conditioning_scale`
684
- if (
685
- isinstance(self.controlnet, ControlNetModel)
686
- or is_compiled
687
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
688
- ):
689
- if not isinstance(controlnet_conditioning_scale, float):
690
- raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
691
- elif (
692
- isinstance(self.controlnet, MultiControlNetModel)
693
- or is_compiled
694
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
695
- ):
696
- if isinstance(controlnet_conditioning_scale, list):
697
- if any(isinstance(i, list) for i in controlnet_conditioning_scale):
698
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
699
- elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
700
- self.controlnet.nets
701
- ):
702
- raise ValueError(
703
- "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
704
- " the same length as the number of controlnets"
705
- )
706
- else:
707
- assert False
708
-
709
- # adapter checks
710
- if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
711
- self.check_image(adapter_image, prompt, prompt_embeds)
712
- elif (
713
- isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
714
- ):
715
- if not isinstance(adapter_image, list):
716
- raise TypeError("For multiple adapters: `adapter_image` must be type `list`")
717
-
718
- # When `image` is a nested list:
719
- # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
720
- elif any(isinstance(i, list) for i in adapter_image):
721
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
722
- elif len(adapter_image) != len(self.adapter.adapters):
723
- raise ValueError(
724
- f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters."
725
- )
726
-
727
- for image_ in adapter_image:
728
- self.check_image(image_, prompt, prompt_embeds)
729
- else:
730
- assert False
731
-
732
- # Check `adapter_conditioning_scale`
733
- if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
734
- if not isinstance(adapter_conditioning_scale, float):
735
- raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.")
736
- elif (
737
- isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
738
- ):
739
- if isinstance(adapter_conditioning_scale, list):
740
- if any(isinstance(i, list) for i in adapter_conditioning_scale):
741
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
742
- elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len(
743
- self.adapter.adapters
744
- ):
745
- raise ValueError(
746
- "For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have"
747
- " the same length as the number of adapters"
748
- )
749
- else:
750
- assert False
751
-
752
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
753
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
754
- shape = (
755
- batch_size,
756
- num_channels_latents,
757
- int(height) // self.vae_scale_factor,
758
- int(width) // self.vae_scale_factor,
759
- )
760
- if isinstance(generator, list) and len(generator) != batch_size:
761
- raise ValueError(
762
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
763
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
764
- )
765
-
766
- if latents is None:
767
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
768
- else:
769
- latents = latents.to(device)
770
-
771
- # scale the initial noise by the standard deviation required by the scheduler
772
- latents = latents * self.scheduler.init_noise_sigma
773
- return latents
774
-
775
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
776
- def _get_add_time_ids(
777
- self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
778
- ):
779
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
780
-
781
- passed_add_embed_dim = (
782
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
783
- )
784
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
785
-
786
- if expected_add_embed_dim != passed_add_embed_dim:
787
- raise ValueError(
788
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
789
- )
790
-
791
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
792
- return add_time_ids
793
-
794
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
795
- def upcast_vae(self):
796
- dtype = self.vae.dtype
797
- self.vae.to(dtype=torch.float32)
798
- use_torch_2_0_or_xformers = isinstance(
799
- self.vae.decoder.mid_block.attentions[0].processor,
800
- (
801
- AttnProcessor2_0,
802
- XFormersAttnProcessor,
803
- LoRAXFormersAttnProcessor,
804
- LoRAAttnProcessor2_0,
805
- ),
806
- )
807
- # if xformers or torch_2_0 is used attention block does not need
808
- # to be in float32 which can save lots of memory
809
- if use_torch_2_0_or_xformers:
810
- self.vae.post_quant_conv.to(dtype)
811
- self.vae.decoder.conv_in.to(dtype)
812
- self.vae.decoder.mid_block.to(dtype)
813
-
814
- # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
815
- def _default_height_width(self, height, width, image):
816
- # NOTE: It is possible that a list of images have different
817
- # dimensions for each image, so just checking the first image
818
- # is not _exactly_ correct, but it is simple.
819
- while isinstance(image, list):
820
- image = image[0]
821
-
822
- if height is None:
823
- if isinstance(image, PIL.Image.Image):
824
- height = image.height
825
- elif isinstance(image, torch.Tensor):
826
- height = image.shape[-2]
827
-
828
- # round down to nearest multiple of `self.adapter.downscale_factor`
829
- height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
830
-
831
- if width is None:
832
- if isinstance(image, PIL.Image.Image):
833
- width = image.width
834
- elif isinstance(image, torch.Tensor):
835
- width = image.shape[-1]
836
-
837
- # round down to nearest multiple of `self.adapter.downscale_factor`
838
- width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
839
-
840
- return height, width
841
-
842
- def prepare_control_image(
843
- self,
844
- image,
845
- width,
846
- height,
847
- batch_size,
848
- num_images_per_prompt,
849
- device,
850
- dtype,
851
- do_classifier_free_guidance=False,
852
- guess_mode=False,
853
- ):
854
- image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
855
- image_batch_size = image.shape[0]
856
-
857
- if image_batch_size == 1:
858
- repeat_by = batch_size
859
- else:
860
- # image batch size is the same as prompt batch size
861
- repeat_by = num_images_per_prompt
862
-
863
- image = image.repeat_interleave(repeat_by, dim=0)
864
-
865
- image = image.to(device=device, dtype=dtype)
866
-
867
- if do_classifier_free_guidance and not guess_mode:
868
- image = torch.cat([image] * 2)
869
-
870
- return image
871
-
872
- @torch.no_grad()
873
- @replace_example_docstring(EXAMPLE_DOC_STRING)
874
- def __call__(
875
- self,
876
- prompt: Union[str, List[str]] = None,
877
- prompt_2: Optional[Union[str, List[str]]] = None,
878
- adapter_image: PipelineImageInput = None,
879
- control_image: PipelineImageInput = None,
880
- height: Optional[int] = None,
881
- width: Optional[int] = None,
882
- num_inference_steps: int = 50,
883
- denoising_end: Optional[float] = None,
884
- guidance_scale: float = 5.0,
885
- negative_prompt: Optional[Union[str, List[str]]] = None,
886
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
887
- num_images_per_prompt: Optional[int] = 1,
888
- eta: float = 0.0,
889
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
890
- latents: Optional[torch.Tensor] = None,
891
- prompt_embeds: Optional[torch.Tensor] = None,
892
- negative_prompt_embeds: Optional[torch.Tensor] = None,
893
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
894
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
895
- output_type: Optional[str] = "pil",
896
- return_dict: bool = True,
897
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
898
- callback_steps: int = 1,
899
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
900
- guidance_rescale: float = 0.0,
901
- original_size: Optional[Tuple[int, int]] = None,
902
- crops_coords_top_left: Tuple[int, int] = (0, 0),
903
- target_size: Optional[Tuple[int, int]] = None,
904
- negative_original_size: Optional[Tuple[int, int]] = None,
905
- negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
906
- negative_target_size: Optional[Tuple[int, int]] = None,
907
- adapter_conditioning_scale: Union[float, List[float]] = 1.0,
908
- adapter_conditioning_factor: float = 1.0,
909
- clip_skip: Optional[int] = None,
910
- controlnet_conditioning_scale=1.0,
911
- guess_mode: bool = False,
912
- control_guidance_start: float = 0.0,
913
- control_guidance_end: float = 1.0,
914
- ):
915
- r"""
916
- Function invoked when calling the pipeline for generation.
917
-
918
- Args:
919
- prompt (`str` or `List[str]`, *optional*):
920
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
921
- instead.
922
- prompt_2 (`str` or `List[str]`, *optional*):
923
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
924
- used in both text-encoders
925
- adapter_image (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
926
- The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
927
- type is specified as `torch.Tensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
928
- accepted as an image. The control image is automatically resized to fit the output image.
929
- control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
930
- `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
931
- The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
932
- specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
933
- accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
934
- and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
935
- `init`, images must be passed as a list such that each element of the list can be correctly batched for
936
- input to a single ControlNet.
937
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
938
- The height in pixels of the generated image. Anything below 512 pixels won't work well for
939
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
940
- and checkpoints that are not specifically fine-tuned on low resolutions.
941
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
942
- The width in pixels of the generated image. Anything below 512 pixels won't work well for
943
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
944
- and checkpoints that are not specifically fine-tuned on low resolutions.
945
- num_inference_steps (`int`, *optional*, defaults to 50):
946
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
947
- expense of slower inference.
948
- denoising_end (`float`, *optional*):
949
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
950
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
951
- still retain a substantial amount of noise as determined by the discrete timesteps selected by the
952
- scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
953
- "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
954
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
955
- guidance_scale (`float`, *optional*, defaults to 5.0):
956
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
957
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
958
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
959
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
960
- usually at the expense of lower image quality.
961
- negative_prompt (`str` or `List[str]`, *optional*):
962
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
963
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
964
- less than `1`).
965
- negative_prompt_2 (`str` or `List[str]`, *optional*):
966
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
967
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
968
- num_images_per_prompt (`int`, *optional*, defaults to 1):
969
- The number of images to generate per prompt.
970
- eta (`float`, *optional*, defaults to 0.0):
971
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
972
- [`schedulers.DDIMScheduler`], will be ignored for others.
973
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
974
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
975
- to make generation deterministic.
976
- latents (`torch.Tensor`, *optional*):
977
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
978
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
979
- tensor will ge generated by sampling using the supplied random `generator`.
980
- prompt_embeds (`torch.Tensor`, *optional*):
981
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
982
- provided, text embeddings will be generated from `prompt` input argument.
983
- negative_prompt_embeds (`torch.Tensor`, *optional*):
984
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
985
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
986
- argument.
987
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
988
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
989
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
990
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
991
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
992
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
993
- input argument.
994
- output_type (`str`, *optional*, defaults to `"pil"`):
995
- The output format of the generate image. Choose between
996
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
997
- return_dict (`bool`, *optional*, defaults to `True`):
998
- Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`]
999
- instead of a plain tuple.
1000
- callback (`Callable`, *optional*):
1001
- A function that will be called every `callback_steps` steps during inference. The function will be
1002
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
1003
- callback_steps (`int`, *optional*, defaults to 1):
1004
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1005
- called at every step.
1006
- cross_attention_kwargs (`dict`, *optional*):
1007
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1008
- `self.processor` in
1009
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1010
- guidance_rescale (`float`, *optional*, defaults to 0.0):
1011
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1012
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1013
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1014
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
1015
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1016
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1017
- `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1018
- explained in section 2.2 of
1019
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1020
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1021
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1022
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1023
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1024
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1025
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1026
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
1027
- not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1028
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1029
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1030
- negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1031
- To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1032
- micro-conditioning as explained in section 2.2 of
1033
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1034
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1035
- negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1036
- To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1037
- micro-conditioning as explained in section 2.2 of
1038
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1039
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1040
- negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1041
- To negatively condition the generation process based on a target image resolution. It should be as same
1042
- as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1043
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1044
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1045
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1046
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the
1047
- residual in the original unet. If multiple adapters are specified in init, you can set the
1048
- corresponding scale as a list.
1049
- adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1050
- The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
1051
- residual in the original unet. If multiple adapters are specified in init, you can set the
1052
- corresponding scale as a list.
1053
- adapter_conditioning_factor (`float`, *optional*, defaults to 1.0):
1054
- The fraction of timesteps for which adapter should be applied. If `adapter_conditioning_factor` is
1055
- `0.0`, adapter is not applied at all. If `adapter_conditioning_factor` is `1.0`, adapter is applied for
1056
- all timesteps. If `adapter_conditioning_factor` is `0.5`, adapter is applied for half of the timesteps.
1057
- clip_skip (`int`, *optional*):
1058
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1059
- the output of the pre-final layer will be used for computing the prompt embeddings.
1060
-
1061
- Examples:
1062
-
1063
- Returns:
1064
- [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
1065
- [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
1066
- `tuple`. When returning a tuple, the first element is a list with the generated images.
1067
- """
1068
- controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1069
- adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter
1070
-
1071
- # 0. Default height and width to unet
1072
-
1073
- height, width = self._default_height_width(height, width, adapter_image)
1074
- device = self._execution_device
1075
-
1076
- if isinstance(adapter, MultiAdapter):
1077
- adapter_input = []
1078
-
1079
- for one_image in adapter_image:
1080
- one_image = _preprocess_adapter_image(one_image, height, width)
1081
- one_image = one_image.to(device=device, dtype=adapter.dtype)
1082
- adapter_input.append(one_image)
1083
- else:
1084
- adapter_input = _preprocess_adapter_image(adapter_image, height, width)
1085
- adapter_input = adapter_input.to(device=device, dtype=adapter.dtype)
1086
- original_size = original_size or (height, width)
1087
- target_size = target_size or (height, width)
1088
-
1089
- # 0.1 align format for control guidance
1090
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1091
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1092
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1093
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1094
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1095
- mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1096
- control_guidance_start, control_guidance_end = (
1097
- mult * [control_guidance_start],
1098
- mult * [control_guidance_end],
1099
- )
1100
-
1101
- if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1102
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1103
- if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float):
1104
- adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.adapters)
1105
-
1106
- # 1. Check inputs. Raise error if not correct
1107
- self.check_inputs(
1108
- prompt,
1109
- prompt_2,
1110
- height,
1111
- width,
1112
- callback_steps,
1113
- negative_prompt=negative_prompt,
1114
- negative_prompt_2=negative_prompt_2,
1115
- prompt_embeds=prompt_embeds,
1116
- negative_prompt_embeds=negative_prompt_embeds,
1117
- pooled_prompt_embeds=pooled_prompt_embeds,
1118
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1119
- )
1120
-
1121
- self.check_conditions(
1122
- prompt,
1123
- prompt_embeds,
1124
- adapter_image,
1125
- control_image,
1126
- adapter_conditioning_scale,
1127
- controlnet_conditioning_scale,
1128
- control_guidance_start,
1129
- control_guidance_end,
1130
- )
1131
-
1132
- # 2. Define call parameters
1133
- if prompt is not None and isinstance(prompt, str):
1134
- batch_size = 1
1135
- elif prompt is not None and isinstance(prompt, list):
1136
- batch_size = len(prompt)
1137
- else:
1138
- batch_size = prompt_embeds.shape[0]
1139
-
1140
- device = self._execution_device
1141
-
1142
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1143
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1144
- # corresponds to doing no classifier free guidance.
1145
- do_classifier_free_guidance = guidance_scale > 1.0
1146
-
1147
- # 3. Encode input prompt
1148
- (
1149
- prompt_embeds,
1150
- negative_prompt_embeds,
1151
- pooled_prompt_embeds,
1152
- negative_pooled_prompt_embeds,
1153
- ) = self.encode_prompt(
1154
- prompt=prompt,
1155
- prompt_2=prompt_2,
1156
- device=device,
1157
- num_images_per_prompt=num_images_per_prompt,
1158
- do_classifier_free_guidance=do_classifier_free_guidance,
1159
- negative_prompt=negative_prompt,
1160
- negative_prompt_2=negative_prompt_2,
1161
- prompt_embeds=prompt_embeds,
1162
- negative_prompt_embeds=negative_prompt_embeds,
1163
- pooled_prompt_embeds=pooled_prompt_embeds,
1164
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1165
- clip_skip=clip_skip,
1166
- )
1167
-
1168
- # 4. Prepare timesteps
1169
- self.scheduler.set_timesteps(num_inference_steps, device=device)
1170
-
1171
- timesteps = self.scheduler.timesteps
1172
-
1173
- # 5. Prepare latent variables
1174
- num_channels_latents = self.unet.config.in_channels
1175
- latents = self.prepare_latents(
1176
- batch_size * num_images_per_prompt,
1177
- num_channels_latents,
1178
- height,
1179
- width,
1180
- prompt_embeds.dtype,
1181
- device,
1182
- generator,
1183
- latents,
1184
- )
1185
-
1186
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1187
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1188
-
1189
- # 7. Prepare added time ids & embeddings & adapter features
1190
- if isinstance(adapter, MultiAdapter):
1191
- adapter_state = adapter(adapter_input, adapter_conditioning_scale)
1192
- for k, v in enumerate(adapter_state):
1193
- adapter_state[k] = v
1194
- else:
1195
- adapter_state = adapter(adapter_input)
1196
- for k, v in enumerate(adapter_state):
1197
- adapter_state[k] = v * adapter_conditioning_scale
1198
- if num_images_per_prompt > 1:
1199
- for k, v in enumerate(adapter_state):
1200
- adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
1201
- if do_classifier_free_guidance:
1202
- for k, v in enumerate(adapter_state):
1203
- adapter_state[k] = torch.cat([v] * 2, dim=0)
1204
-
1205
- # 7.2 Prepare control images
1206
- if isinstance(controlnet, ControlNetModel):
1207
- control_image = self.prepare_control_image(
1208
- image=control_image,
1209
- width=width,
1210
- height=height,
1211
- batch_size=batch_size * num_images_per_prompt,
1212
- num_images_per_prompt=num_images_per_prompt,
1213
- device=device,
1214
- dtype=controlnet.dtype,
1215
- do_classifier_free_guidance=do_classifier_free_guidance,
1216
- guess_mode=guess_mode,
1217
- )
1218
- elif isinstance(controlnet, MultiControlNetModel):
1219
- control_images = []
1220
-
1221
- for control_image_ in control_image:
1222
- control_image_ = self.prepare_control_image(
1223
- image=control_image_,
1224
- width=width,
1225
- height=height,
1226
- batch_size=batch_size * num_images_per_prompt,
1227
- num_images_per_prompt=num_images_per_prompt,
1228
- device=device,
1229
- dtype=controlnet.dtype,
1230
- do_classifier_free_guidance=do_classifier_free_guidance,
1231
- guess_mode=guess_mode,
1232
- )
1233
-
1234
- control_images.append(control_image_)
1235
-
1236
- control_image = control_images
1237
- else:
1238
- raise ValueError(f"{controlnet.__class__} is not supported.")
1239
-
1240
- # 8.2 Create tensor stating which controlnets to keep
1241
- controlnet_keep = []
1242
- for i in range(len(timesteps)):
1243
- keeps = [
1244
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1245
- for s, e in zip(control_guidance_start, control_guidance_end)
1246
- ]
1247
- if isinstance(self.controlnet, MultiControlNetModel):
1248
- controlnet_keep.append(keeps)
1249
- else:
1250
- controlnet_keep.append(keeps[0])
1251
-
1252
- add_text_embeds = pooled_prompt_embeds
1253
- if self.text_encoder_2 is None:
1254
- text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1255
- else:
1256
- text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1257
-
1258
- add_time_ids = self._get_add_time_ids(
1259
- original_size,
1260
- crops_coords_top_left,
1261
- target_size,
1262
- dtype=prompt_embeds.dtype,
1263
- text_encoder_projection_dim=text_encoder_projection_dim,
1264
- )
1265
- if negative_original_size is not None and negative_target_size is not None:
1266
- negative_add_time_ids = self._get_add_time_ids(
1267
- negative_original_size,
1268
- negative_crops_coords_top_left,
1269
- negative_target_size,
1270
- dtype=prompt_embeds.dtype,
1271
- text_encoder_projection_dim=text_encoder_projection_dim,
1272
- )
1273
- else:
1274
- negative_add_time_ids = add_time_ids
1275
-
1276
- if do_classifier_free_guidance:
1277
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1278
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1279
- add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1280
-
1281
- prompt_embeds = prompt_embeds.to(device)
1282
- add_text_embeds = add_text_embeds.to(device)
1283
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1284
-
1285
- # 8. Denoising loop
1286
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1287
-
1288
- # 7.1 Apply denoising_end
1289
- if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
1290
- discrete_timestep_cutoff = int(
1291
- round(
1292
- self.scheduler.config.num_train_timesteps
1293
- - (denoising_end * self.scheduler.config.num_train_timesteps)
1294
- )
1295
- )
1296
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1297
- timesteps = timesteps[:num_inference_steps]
1298
-
1299
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1300
- for i, t in enumerate(timesteps):
1301
- # expand the latents if we are doing classifier free guidance
1302
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1303
-
1304
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1305
-
1306
- # predict the noise residual
1307
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1308
-
1309
- if i < int(num_inference_steps * adapter_conditioning_factor):
1310
- down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
1311
- else:
1312
- down_intrablock_additional_residuals = None
1313
-
1314
- # ----------- ControlNet
1315
-
1316
- # expand the latents if we are doing classifier free guidance
1317
- latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1318
-
1319
- # concat latents, mask, masked_image_latents in the channel dimension
1320
- latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t)
1321
-
1322
- # controlnet(s) inference
1323
- if guess_mode and do_classifier_free_guidance:
1324
- # Infer ControlNet only for the conditional batch.
1325
- control_model_input = latents
1326
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1327
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1328
- controlnet_added_cond_kwargs = {
1329
- "text_embeds": add_text_embeds.chunk(2)[1],
1330
- "time_ids": add_time_ids.chunk(2)[1],
1331
- }
1332
- else:
1333
- control_model_input = latent_model_input_controlnet
1334
- controlnet_prompt_embeds = prompt_embeds
1335
- controlnet_added_cond_kwargs = added_cond_kwargs
1336
-
1337
- if isinstance(controlnet_keep[i], list):
1338
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1339
- else:
1340
- controlnet_cond_scale = controlnet_conditioning_scale
1341
- if isinstance(controlnet_cond_scale, list):
1342
- controlnet_cond_scale = controlnet_cond_scale[0]
1343
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
1344
- down_block_res_samples, mid_block_res_sample = self.controlnet(
1345
- control_model_input,
1346
- t,
1347
- encoder_hidden_states=controlnet_prompt_embeds,
1348
- controlnet_cond=control_image,
1349
- conditioning_scale=cond_scale,
1350
- guess_mode=guess_mode,
1351
- added_cond_kwargs=controlnet_added_cond_kwargs,
1352
- return_dict=False,
1353
- )
1354
-
1355
- noise_pred = self.unet(
1356
- latent_model_input,
1357
- t,
1358
- encoder_hidden_states=prompt_embeds,
1359
- cross_attention_kwargs=cross_attention_kwargs,
1360
- added_cond_kwargs=added_cond_kwargs,
1361
- return_dict=False,
1362
- down_intrablock_additional_residuals=down_intrablock_additional_residuals, # t2iadapter
1363
- down_block_additional_residuals=down_block_res_samples, # controlnet
1364
- mid_block_additional_residual=mid_block_res_sample, # controlnet
1365
- )[0]
1366
-
1367
- # perform guidance
1368
- if do_classifier_free_guidance:
1369
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1370
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1371
-
1372
- if do_classifier_free_guidance and guidance_rescale > 0.0:
1373
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1374
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1375
-
1376
- # compute the previous noisy sample x_t -> x_t-1
1377
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1378
-
1379
- # call the callback, if provided
1380
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1381
- progress_bar.update()
1382
- if callback is not None and i % callback_steps == 0:
1383
- step_idx = i // getattr(self.scheduler, "order", 1)
1384
- callback(step_idx, t, latents)
1385
-
1386
- if not output_type == "latent":
1387
- # make sure the VAE is in float32 mode, as it overflows in float16
1388
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1389
-
1390
- if needs_upcasting:
1391
- self.upcast_vae()
1392
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1393
-
1394
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1395
-
1396
- # cast back to fp16 if needed
1397
- if needs_upcasting:
1398
- self.vae.to(dtype=torch.float16)
1399
- else:
1400
- image = latents
1401
- return StableDiffusionXLPipelineOutput(images=image)
1402
-
1403
- image = self.image_processor.postprocess(image, output_type=output_type)
1404
-
1405
- # Offload all models
1406
- self.maybe_free_model_hooks()
1407
-
1408
- if not return_dict:
1409
- return (image,)
1410
-
1411
- return StableDiffusionXLPipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py DELETED
@@ -1,1850 +0,0 @@
1
- # Copyright 2024 Jake Babbidge, TencentARC and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # ignore the entire file for precommit
16
- # type: ignore
17
-
18
- import inspect
19
- from collections.abc import Callable
20
- from typing import Any, Dict, List, Optional, Tuple, Union
21
-
22
- import numpy as np
23
- import PIL
24
- import torch
25
- import torch.nn.functional as F
26
- from transformers import (
27
- CLIPTextModel,
28
- CLIPTextModelWithProjection,
29
- CLIPTokenizer,
30
- )
31
-
32
- from diffusers import DiffusionPipeline
33
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
34
- from diffusers.loaders import (
35
- FromSingleFileMixin,
36
- LoraLoaderMixin,
37
- StableDiffusionXLLoraLoaderMixin,
38
- TextualInversionLoaderMixin,
39
- )
40
- from diffusers.models import (
41
- AutoencoderKL,
42
- ControlNetModel,
43
- MultiAdapter,
44
- T2IAdapter,
45
- UNet2DConditionModel,
46
- )
47
- from diffusers.models.attention_processor import (
48
- AttnProcessor2_0,
49
- LoRAAttnProcessor2_0,
50
- LoRAXFormersAttnProcessor,
51
- XFormersAttnProcessor,
52
- )
53
- from diffusers.models.lora import adjust_lora_scale_text_encoder
54
- from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
55
- from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
56
- from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
57
- from diffusers.schedulers import KarrasDiffusionSchedulers
58
- from diffusers.utils import (
59
- PIL_INTERPOLATION,
60
- USE_PEFT_BACKEND,
61
- logging,
62
- replace_example_docstring,
63
- scale_lora_layers,
64
- unscale_lora_layers,
65
- )
66
- from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
67
-
68
-
69
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
70
-
71
- EXAMPLE_DOC_STRING = """
72
- Examples:
73
- ```py
74
- >>> import torch
75
- >>> from diffusers import DiffusionPipeline, T2IAdapter
76
- >>> from diffusers.utils import load_image
77
- >>> from PIL import Image
78
- >>> from controlnet_aux.midas import MidasDetector
79
-
80
- >>> adapter = T2IAdapter.from_pretrained(
81
- ... "TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
82
- ... ).to("cuda")
83
-
84
- >>> controlnet = ControlNetModel.from_pretrained(
85
- ... "diffusers/controlnet-depth-sdxl-1.0",
86
- ... torch_dtype=torch.float16,
87
- ... variant="fp16",
88
- ... use_safetensors=True
89
- ... ).to("cuda")
90
-
91
- >>> pipe = DiffusionPipeline.from_pretrained(
92
- ... "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
93
- ... torch_dtype=torch.float16,
94
- ... variant="fp16",
95
- ... use_safetensors=True,
96
- ... custom_pipeline="stable_diffusion_xl_adapter_controlnet_inpaint",
97
- ... adapter=adapter,
98
- ... controlnet=controlnet,
99
- ... ).to("cuda")
100
-
101
- >>> prompt = "a tiger sitting on a park bench"
102
- >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
103
- >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
104
-
105
- >>> image = load_image(img_url).resize((1024, 1024))
106
- >>> mask_image = load_image(mask_url).resize((1024, 1024))
107
-
108
- >>> midas_depth = MidasDetector.from_pretrained(
109
- ... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
110
- ... ).to("cuda")
111
-
112
- >>> depth_image = midas_depth(
113
- ... image, detect_resolution=512, image_resolution=1024
114
- ... )
115
-
116
- >>> strength = 0.4
117
-
118
- >>> generator = torch.manual_seed(42)
119
-
120
- >>> result_image = pipe(
121
- ... image=image,
122
- ... mask_image=mask,
123
- ... adapter_image=depth_image,
124
- ... control_image=depth_image,
125
- ... controlnet_conditioning_scale=strength,
126
- ... adapter_conditioning_scale=strength,
127
- ... strength=0.7,
128
- ... generator=generator,
129
- ... prompt=prompt,
130
- ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",
131
- ... num_inference_steps=50
132
- ... ).images[0]
133
- ```
134
- """
135
-
136
-
137
- def _preprocess_adapter_image(image, height, width):
138
- if isinstance(image, torch.Tensor):
139
- return image
140
- elif isinstance(image, PIL.Image.Image):
141
- image = [image]
142
-
143
- if isinstance(image[0], PIL.Image.Image):
144
- image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
145
- image = [
146
- i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
147
- ] # expand [h, w] or [h, w, c] to [b, h, w, c]
148
- image = np.concatenate(image, axis=0)
149
- image = np.array(image).astype(np.float32) / 255.0
150
- image = image.transpose(0, 3, 1, 2)
151
- image = torch.from_numpy(image)
152
- elif isinstance(image[0], torch.Tensor):
153
- if image[0].ndim == 3:
154
- image = torch.stack(image, dim=0)
155
- elif image[0].ndim == 4:
156
- image = torch.cat(image, dim=0)
157
- else:
158
- raise ValueError(
159
- f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
160
- )
161
- return image
162
-
163
-
164
- def mask_pil_to_torch(mask, height, width):
165
- # preprocess mask
166
- if isinstance(mask, Union[PIL.Image.Image, np.ndarray]):
167
- mask = [mask]
168
-
169
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
170
- mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
171
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
172
- mask = mask.astype(np.float32) / 255.0
173
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
174
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
175
-
176
- mask = torch.from_numpy(mask)
177
- return mask
178
-
179
-
180
- def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
181
- """
182
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
183
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
184
- ``image`` and ``1`` for the ``mask``.
185
-
186
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
187
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
188
-
189
- Args:
190
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
191
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
192
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
193
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
194
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
195
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
196
-
197
-
198
- Raises:
199
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
200
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
201
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
202
- (ot the other way around).
203
-
204
- Returns:
205
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
206
- dimensions: ``batch x channels x height x width``.
207
- """
208
-
209
- # checkpoint. #TODO(Yiyi) - need to clean this up later
210
- if image is None:
211
- raise ValueError("`image` input cannot be undefined.")
212
-
213
- if mask is None:
214
- raise ValueError("`mask_image` input cannot be undefined.")
215
-
216
- if isinstance(image, torch.Tensor):
217
- if not isinstance(mask, torch.Tensor):
218
- mask = mask_pil_to_torch(mask, height, width)
219
-
220
- if image.ndim == 3:
221
- image = image.unsqueeze(0)
222
-
223
- # Batch and add channel dim for single mask
224
- if mask.ndim == 2:
225
- mask = mask.unsqueeze(0).unsqueeze(0)
226
-
227
- # Batch single mask or add channel dim
228
- if mask.ndim == 3:
229
- # Single batched mask, no channel dim or single mask not batched but channel dim
230
- if mask.shape[0] == 1:
231
- mask = mask.unsqueeze(0)
232
-
233
- # Batched masks no channel dim
234
- else:
235
- mask = mask.unsqueeze(1)
236
-
237
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
238
- # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
239
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
240
-
241
- # Check image is in [-1, 1]
242
- # if image.min() < -1 or image.max() > 1:
243
- # raise ValueError("Image should be in [-1, 1] range")
244
-
245
- # Check mask is in [0, 1]
246
- if mask.min() < 0 or mask.max() > 1:
247
- raise ValueError("Mask should be in [0, 1] range")
248
-
249
- # Binarize mask
250
- mask[mask < 0.5] = 0
251
- mask[mask >= 0.5] = 1
252
-
253
- # Image as float32
254
- image = image.to(dtype=torch.float32)
255
- elif isinstance(mask, torch.Tensor):
256
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
257
- else:
258
- # preprocess image
259
- if isinstance(image, Union[PIL.Image.Image, np.ndarray]):
260
- image = [image]
261
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
262
- # resize all images w.r.t passed height an width
263
- image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
264
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
265
- image = np.concatenate(image, axis=0)
266
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
267
- image = np.concatenate([i[None, :] for i in image], axis=0)
268
-
269
- image = image.transpose(0, 3, 1, 2)
270
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
271
-
272
- mask = mask_pil_to_torch(mask, height, width)
273
- mask[mask < 0.5] = 0
274
- mask[mask >= 0.5] = 1
275
-
276
- if image.shape[1] == 4:
277
- # images are in latent space and thus can't
278
- # be masked set masked_image to None
279
- # we assume that the checkpoint is not an inpainting
280
- # checkpoint. #TODO(Yiyi) - need to clean this up later
281
- masked_image = None
282
- else:
283
- masked_image = image * (mask < 0.5)
284
-
285
- # n.b. ensure backwards compatibility as old function does not return image
286
- if return_image:
287
- return mask, masked_image, image
288
-
289
- return mask, masked_image
290
-
291
-
292
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
293
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
294
- """
295
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
296
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
297
- """
298
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
299
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
300
- # rescale the results from guidance (fixes overexposure)
301
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
302
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
303
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
304
- return noise_cfg
305
-
306
-
307
- class StableDiffusionXLControlNetAdapterInpaintPipeline(
308
- DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, LoraLoaderMixin
309
- ):
310
- r"""
311
- Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
312
- https://arxiv.org/abs/2302.08453
313
-
314
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
315
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
316
-
317
- Args:
318
- adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
319
- Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
320
- list, the outputs from each Adapter are added together to create one combined additional conditioning.
321
- adapter_weights (`List[float]`, *optional*, defaults to None):
322
- List of floats representing the weight which will be multiply to each adapter's output before adding them
323
- together.
324
- vae ([`AutoencoderKL`]):
325
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
326
- text_encoder ([`CLIPTextModel`]):
327
- Frozen text-encoder. Stable Diffusion uses the text portion of
328
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
329
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
330
- tokenizer (`CLIPTokenizer`):
331
- Tokenizer of class
332
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
333
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
334
- scheduler ([`SchedulerMixin`]):
335
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
336
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
337
- safety_checker ([`StableDiffusionSafetyChecker`]):
338
- Classification module that estimates whether generated images could be considered offensive or harmful.
339
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
340
- feature_extractor ([`CLIPFeatureExtractor`]):
341
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
342
- requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
343
- Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config
344
- of `stabilityai/stable-diffusion-xl-refiner-1-0`.
345
- force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
346
- Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
347
- `stabilityai/stable-diffusion-xl-base-1-0`.
348
- """
349
-
350
- def __init__(
351
- self,
352
- vae: AutoencoderKL,
353
- text_encoder: CLIPTextModel,
354
- text_encoder_2: CLIPTextModelWithProjection,
355
- tokenizer: CLIPTokenizer,
356
- tokenizer_2: CLIPTokenizer,
357
- unet: UNet2DConditionModel,
358
- adapter: Union[T2IAdapter, MultiAdapter],
359
- controlnet: Union[ControlNetModel, MultiControlNetModel],
360
- scheduler: KarrasDiffusionSchedulers,
361
- requires_aesthetics_score: bool = False,
362
- force_zeros_for_empty_prompt: bool = True,
363
- ):
364
- super().__init__()
365
-
366
- if isinstance(controlnet, (list, tuple)):
367
- controlnet = MultiControlNetModel(controlnet)
368
-
369
- self.register_modules(
370
- vae=vae,
371
- text_encoder=text_encoder,
372
- text_encoder_2=text_encoder_2,
373
- tokenizer=tokenizer,
374
- tokenizer_2=tokenizer_2,
375
- unet=unet,
376
- adapter=adapter,
377
- controlnet=controlnet,
378
- scheduler=scheduler,
379
- )
380
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
381
- self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
382
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
383
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
384
- self.control_image_processor = VaeImageProcessor(
385
- vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
386
- )
387
- self.default_sample_size = self.unet.config.sample_size
388
-
389
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
390
- def encode_prompt(
391
- self,
392
- prompt: str,
393
- prompt_2: Optional[str] = None,
394
- device: Optional[torch.device] = None,
395
- num_images_per_prompt: int = 1,
396
- do_classifier_free_guidance: bool = True,
397
- negative_prompt: Optional[str] = None,
398
- negative_prompt_2: Optional[str] = None,
399
- prompt_embeds: Optional[torch.Tensor] = None,
400
- negative_prompt_embeds: Optional[torch.Tensor] = None,
401
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
402
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
403
- lora_scale: Optional[float] = None,
404
- clip_skip: Optional[int] = None,
405
- ):
406
- r"""
407
- Encodes the prompt into text encoder hidden states.
408
-
409
- Args:
410
- prompt (`str` or `List[str]`, *optional*):
411
- prompt to be encoded
412
- prompt_2 (`str` or `List[str]`, *optional*):
413
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
414
- used in both text-encoders
415
- device: (`torch.device`):
416
- torch device
417
- num_images_per_prompt (`int`):
418
- number of images that should be generated per prompt
419
- do_classifier_free_guidance (`bool`):
420
- whether to use classifier free guidance or not
421
- negative_prompt (`str` or `List[str]`, *optional*):
422
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
423
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
424
- less than `1`).
425
- negative_prompt_2 (`str` or `List[str]`, *optional*):
426
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
427
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
428
- prompt_embeds (`torch.Tensor`, *optional*):
429
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
430
- provided, text embeddings will be generated from `prompt` input argument.
431
- negative_prompt_embeds (`torch.Tensor`, *optional*):
432
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
433
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
434
- argument.
435
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
436
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
437
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
438
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
439
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
440
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
441
- input argument.
442
- lora_scale (`float`, *optional*):
443
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
444
- clip_skip (`int`, *optional*):
445
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
446
- the output of the pre-final layer will be used for computing the prompt embeddings.
447
- """
448
- device = device or self._execution_device
449
-
450
- # set lora scale so that monkey patched LoRA
451
- # function of text encoder can correctly access it
452
- if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
453
- self._lora_scale = lora_scale
454
-
455
- # dynamically adjust the LoRA scale
456
- if self.text_encoder is not None:
457
- if not USE_PEFT_BACKEND:
458
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
459
- else:
460
- scale_lora_layers(self.text_encoder, lora_scale)
461
-
462
- if self.text_encoder_2 is not None:
463
- if not USE_PEFT_BACKEND:
464
- adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
465
- else:
466
- scale_lora_layers(self.text_encoder_2, lora_scale)
467
-
468
- prompt = [prompt] if isinstance(prompt, str) else prompt
469
-
470
- if prompt is not None:
471
- batch_size = len(prompt)
472
- else:
473
- batch_size = prompt_embeds.shape[0]
474
-
475
- # Define tokenizers and text encoders
476
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
477
- text_encoders = (
478
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
479
- )
480
-
481
- if prompt_embeds is None:
482
- prompt_2 = prompt_2 or prompt
483
- prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
484
-
485
- # textual inversion: process multi-vector tokens if necessary
486
- prompt_embeds_list = []
487
- prompts = [prompt, prompt_2]
488
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
489
- if isinstance(self, TextualInversionLoaderMixin):
490
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
491
-
492
- text_inputs = tokenizer(
493
- prompt,
494
- padding="max_length",
495
- max_length=tokenizer.model_max_length,
496
- truncation=True,
497
- return_tensors="pt",
498
- )
499
-
500
- text_input_ids = text_inputs.input_ids
501
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
502
-
503
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
504
- text_input_ids, untruncated_ids
505
- ):
506
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
507
- logger.warning(
508
- "The following part of your input was truncated because CLIP can only handle sequences up to"
509
- f" {tokenizer.model_max_length} tokens: {removed_text}"
510
- )
511
-
512
- prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
513
-
514
- # We are only ALWAYS interested in the pooled output of the final text encoder
515
- pooled_prompt_embeds = prompt_embeds[0]
516
- if clip_skip is None:
517
- prompt_embeds = prompt_embeds.hidden_states[-2]
518
- else:
519
- # "2" because SDXL always indexes from the penultimate layer.
520
- prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
521
-
522
- prompt_embeds_list.append(prompt_embeds)
523
-
524
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
525
-
526
- # get unconditional embeddings for classifier free guidance
527
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
528
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
529
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
530
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
531
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
532
- negative_prompt = negative_prompt or ""
533
- negative_prompt_2 = negative_prompt_2 or negative_prompt
534
-
535
- # normalize str to list
536
- negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
537
- negative_prompt_2 = (
538
- batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
539
- )
540
-
541
- uncond_tokens: List[str]
542
- if prompt is not None and type(prompt) is not type(negative_prompt):
543
- raise TypeError(
544
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
545
- f" {type(prompt)}."
546
- )
547
- elif batch_size != len(negative_prompt):
548
- raise ValueError(
549
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
550
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
551
- " the batch size of `prompt`."
552
- )
553
- else:
554
- uncond_tokens = [negative_prompt, negative_prompt_2]
555
-
556
- negative_prompt_embeds_list = []
557
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
558
- if isinstance(self, TextualInversionLoaderMixin):
559
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
560
-
561
- max_length = prompt_embeds.shape[1]
562
- uncond_input = tokenizer(
563
- negative_prompt,
564
- padding="max_length",
565
- max_length=max_length,
566
- truncation=True,
567
- return_tensors="pt",
568
- )
569
-
570
- negative_prompt_embeds = text_encoder(
571
- uncond_input.input_ids.to(device),
572
- output_hidden_states=True,
573
- )
574
- # We are only ALWAYS interested in the pooled output of the final text encoder
575
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
576
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
577
-
578
- negative_prompt_embeds_list.append(negative_prompt_embeds)
579
-
580
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
581
-
582
- if self.text_encoder_2 is not None:
583
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
584
- else:
585
- prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
586
-
587
- bs_embed, seq_len, _ = prompt_embeds.shape
588
- # duplicate text embeddings for each generation per prompt, using mps friendly method
589
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
590
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
591
-
592
- if do_classifier_free_guidance:
593
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
594
- seq_len = negative_prompt_embeds.shape[1]
595
-
596
- if self.text_encoder_2 is not None:
597
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
598
- else:
599
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
600
-
601
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
602
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
603
-
604
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
605
- bs_embed * num_images_per_prompt, -1
606
- )
607
- if do_classifier_free_guidance:
608
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
609
- bs_embed * num_images_per_prompt, -1
610
- )
611
-
612
- if self.text_encoder is not None:
613
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
614
- # Retrieve the original scale by scaling back the LoRA layers
615
- unscale_lora_layers(self.text_encoder, lora_scale)
616
-
617
- if self.text_encoder_2 is not None:
618
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
619
- # Retrieve the original scale by scaling back the LoRA layers
620
- unscale_lora_layers(self.text_encoder_2, lora_scale)
621
-
622
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
623
-
624
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
625
- def prepare_extra_step_kwargs(self, generator, eta):
626
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
627
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
628
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
629
- # and should be between [0, 1]
630
-
631
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
632
- extra_step_kwargs = {}
633
- if accepts_eta:
634
- extra_step_kwargs["eta"] = eta
635
-
636
- # check if the scheduler accepts generator
637
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
638
- if accepts_generator:
639
- extra_step_kwargs["generator"] = generator
640
- return extra_step_kwargs
641
-
642
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
643
- def check_image(self, image, prompt, prompt_embeds):
644
- image_is_pil = isinstance(image, PIL.Image.Image)
645
- image_is_tensor = isinstance(image, torch.Tensor)
646
- image_is_np = isinstance(image, np.ndarray)
647
- image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
648
- image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
649
- image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
650
-
651
- if (
652
- not image_is_pil
653
- and not image_is_tensor
654
- and not image_is_np
655
- and not image_is_pil_list
656
- and not image_is_tensor_list
657
- and not image_is_np_list
658
- ):
659
- raise TypeError(
660
- f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
661
- )
662
-
663
- if image_is_pil:
664
- image_batch_size = 1
665
- else:
666
- image_batch_size = len(image)
667
-
668
- if prompt is not None and isinstance(prompt, str):
669
- prompt_batch_size = 1
670
- elif prompt is not None and isinstance(prompt, list):
671
- prompt_batch_size = len(prompt)
672
- elif prompt_embeds is not None:
673
- prompt_batch_size = prompt_embeds.shape[0]
674
-
675
- if image_batch_size != 1 and image_batch_size != prompt_batch_size:
676
- raise ValueError(
677
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
678
- )
679
-
680
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs
681
- def check_inputs(
682
- self,
683
- prompt,
684
- prompt_2,
685
- height,
686
- width,
687
- callback_steps,
688
- negative_prompt=None,
689
- negative_prompt_2=None,
690
- prompt_embeds=None,
691
- negative_prompt_embeds=None,
692
- pooled_prompt_embeds=None,
693
- negative_pooled_prompt_embeds=None,
694
- callback_on_step_end_tensor_inputs=None,
695
- ):
696
- if height % 8 != 0 or width % 8 != 0:
697
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
698
-
699
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
700
- raise ValueError(
701
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
702
- f" {type(callback_steps)}."
703
- )
704
-
705
- if callback_on_step_end_tensor_inputs is not None and not all(
706
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
707
- ):
708
- raise ValueError(
709
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
710
- )
711
-
712
- if prompt is not None and prompt_embeds is not None:
713
- raise ValueError(
714
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
715
- " only forward one of the two."
716
- )
717
- elif prompt_2 is not None and prompt_embeds is not None:
718
- raise ValueError(
719
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
720
- " only forward one of the two."
721
- )
722
- elif prompt is None and prompt_embeds is None:
723
- raise ValueError(
724
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
725
- )
726
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
727
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
728
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
729
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
730
-
731
- if negative_prompt is not None and negative_prompt_embeds is not None:
732
- raise ValueError(
733
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
734
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
735
- )
736
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
737
- raise ValueError(
738
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
739
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
740
- )
741
-
742
- if prompt_embeds is not None and negative_prompt_embeds is not None:
743
- if prompt_embeds.shape != negative_prompt_embeds.shape:
744
- raise ValueError(
745
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
746
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
747
- f" {negative_prompt_embeds.shape}."
748
- )
749
-
750
- if prompt_embeds is not None and pooled_prompt_embeds is None:
751
- raise ValueError(
752
- "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
753
- )
754
-
755
- if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
756
- raise ValueError(
757
- "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
758
- )
759
-
760
- def check_conditions(
761
- self,
762
- prompt,
763
- prompt_embeds,
764
- adapter_image,
765
- control_image,
766
- adapter_conditioning_scale,
767
- controlnet_conditioning_scale,
768
- control_guidance_start,
769
- control_guidance_end,
770
- ):
771
- # controlnet checks
772
- if not isinstance(control_guidance_start, (tuple, list)):
773
- control_guidance_start = [control_guidance_start]
774
-
775
- if not isinstance(control_guidance_end, (tuple, list)):
776
- control_guidance_end = [control_guidance_end]
777
-
778
- if len(control_guidance_start) != len(control_guidance_end):
779
- raise ValueError(
780
- f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
781
- )
782
-
783
- if isinstance(self.controlnet, MultiControlNetModel):
784
- if len(control_guidance_start) != len(self.controlnet.nets):
785
- raise ValueError(
786
- f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
787
- )
788
-
789
- for start, end in zip(control_guidance_start, control_guidance_end):
790
- if start >= end:
791
- raise ValueError(
792
- f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
793
- )
794
- if start < 0.0:
795
- raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
796
- if end > 1.0:
797
- raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
798
-
799
- # Check controlnet `image`
800
- is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
801
- self.controlnet, torch._dynamo.eval_frame.OptimizedModule
802
- )
803
- if (
804
- isinstance(self.controlnet, ControlNetModel)
805
- or is_compiled
806
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
807
- ):
808
- self.check_image(control_image, prompt, prompt_embeds)
809
- elif (
810
- isinstance(self.controlnet, MultiControlNetModel)
811
- or is_compiled
812
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
813
- ):
814
- if not isinstance(control_image, list):
815
- raise TypeError("For multiple controlnets: `control_image` must be type `list`")
816
-
817
- # When `image` is a nested list:
818
- # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
819
- elif any(isinstance(i, list) for i in control_image):
820
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
821
- elif len(control_image) != len(self.controlnet.nets):
822
- raise ValueError(
823
- f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets."
824
- )
825
-
826
- for image_ in control_image:
827
- self.check_image(image_, prompt, prompt_embeds)
828
- else:
829
- assert False
830
-
831
- # Check `controlnet_conditioning_scale`
832
- if (
833
- isinstance(self.controlnet, ControlNetModel)
834
- or is_compiled
835
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
836
- ):
837
- if not isinstance(controlnet_conditioning_scale, float):
838
- raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
839
- elif (
840
- isinstance(self.controlnet, MultiControlNetModel)
841
- or is_compiled
842
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
843
- ):
844
- if isinstance(controlnet_conditioning_scale, list):
845
- if any(isinstance(i, list) for i in controlnet_conditioning_scale):
846
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
847
- elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
848
- self.controlnet.nets
849
- ):
850
- raise ValueError(
851
- "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
852
- " the same length as the number of controlnets"
853
- )
854
- else:
855
- assert False
856
-
857
- # adapter checks
858
- if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
859
- self.check_image(adapter_image, prompt, prompt_embeds)
860
- elif (
861
- isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
862
- ):
863
- if not isinstance(adapter_image, list):
864
- raise TypeError("For multiple adapters: `adapter_image` must be type `list`")
865
-
866
- # When `image` is a nested list:
867
- # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
868
- elif any(isinstance(i, list) for i in adapter_image):
869
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
870
- elif len(adapter_image) != len(self.adapter.adapters):
871
- raise ValueError(
872
- f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters."
873
- )
874
-
875
- for image_ in adapter_image:
876
- self.check_image(image_, prompt, prompt_embeds)
877
- else:
878
- assert False
879
-
880
- # Check `adapter_conditioning_scale`
881
- if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
882
- if not isinstance(adapter_conditioning_scale, float):
883
- raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.")
884
- elif (
885
- isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
886
- ):
887
- if isinstance(adapter_conditioning_scale, list):
888
- if any(isinstance(i, list) for i in adapter_conditioning_scale):
889
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
890
- elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len(
891
- self.adapter.adapters
892
- ):
893
- raise ValueError(
894
- "For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have"
895
- " the same length as the number of adapters"
896
- )
897
- else:
898
- assert False
899
-
900
- def prepare_latents(
901
- self,
902
- batch_size,
903
- num_channels_latents,
904
- height,
905
- width,
906
- dtype,
907
- device,
908
- generator,
909
- latents=None,
910
- image=None,
911
- timestep=None,
912
- is_strength_max=True,
913
- add_noise=True,
914
- return_noise=False,
915
- return_image_latents=False,
916
- ):
917
- shape = (
918
- batch_size,
919
- num_channels_latents,
920
- height // self.vae_scale_factor,
921
- width // self.vae_scale_factor,
922
- )
923
- if isinstance(generator, list) and len(generator) != batch_size:
924
- raise ValueError(
925
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
926
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
927
- )
928
-
929
- if (image is None or timestep is None) and not is_strength_max:
930
- raise ValueError(
931
- "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
932
- "However, either the image or the noise timestep has not been provided."
933
- )
934
-
935
- if image.shape[1] == 4:
936
- image_latents = image.to(device=device, dtype=dtype)
937
- elif return_image_latents or (latents is None and not is_strength_max):
938
- image = image.to(device=device, dtype=dtype)
939
- image_latents = self._encode_vae_image(image=image, generator=generator)
940
-
941
- image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
942
-
943
- if latents is None and add_noise:
944
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
945
- # if strength is 1. then initialise the latents to noise, else initial to image + noise
946
- latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
947
- # if pure noise then scale the initial latents by the Scheduler's init sigma
948
- latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
949
- elif add_noise:
950
- noise = latents.to(device)
951
- latents = noise * self.scheduler.init_noise_sigma
952
- else:
953
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
954
- latents = image_latents.to(device)
955
-
956
- outputs = (latents,)
957
-
958
- if return_noise:
959
- outputs += (noise,)
960
-
961
- if return_image_latents:
962
- outputs += (image_latents,)
963
-
964
- return outputs
965
-
966
- def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
967
- dtype = image.dtype
968
- if self.vae.config.force_upcast:
969
- image = image.float()
970
- self.vae.to(dtype=torch.float32)
971
-
972
- if isinstance(generator, list):
973
- image_latents = [
974
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
975
- for i in range(image.shape[0])
976
- ]
977
- image_latents = torch.cat(image_latents, dim=0)
978
- else:
979
- image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
980
-
981
- if self.vae.config.force_upcast:
982
- self.vae.to(dtype)
983
-
984
- image_latents = image_latents.to(dtype)
985
- image_latents = self.vae.config.scaling_factor * image_latents
986
-
987
- return image_latents
988
-
989
- def prepare_mask_latents(
990
- self,
991
- mask,
992
- masked_image,
993
- batch_size,
994
- height,
995
- width,
996
- dtype,
997
- device,
998
- generator,
999
- do_classifier_free_guidance,
1000
- ):
1001
- # resize the mask to latents shape as we concatenate the mask to the latents
1002
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
1003
- # and half precision
1004
- mask = torch.nn.functional.interpolate(
1005
- mask,
1006
- size=(
1007
- height // self.vae_scale_factor,
1008
- width // self.vae_scale_factor,
1009
- ),
1010
- )
1011
- mask = mask.to(device=device, dtype=dtype)
1012
-
1013
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
1014
- if mask.shape[0] < batch_size:
1015
- if not batch_size % mask.shape[0] == 0:
1016
- raise ValueError(
1017
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
1018
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
1019
- " of masks that you pass is divisible by the total requested batch size."
1020
- )
1021
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
1022
-
1023
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
1024
-
1025
- masked_image_latents = None
1026
- if masked_image is not None:
1027
- masked_image = masked_image.to(device=device, dtype=dtype)
1028
- masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
1029
- if masked_image_latents.shape[0] < batch_size:
1030
- if not batch_size % masked_image_latents.shape[0] == 0:
1031
- raise ValueError(
1032
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
1033
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
1034
- " Make sure the number of images that you pass is divisible by the total requested batch size."
1035
- )
1036
- masked_image_latents = masked_image_latents.repeat(
1037
- batch_size // masked_image_latents.shape[0], 1, 1, 1
1038
- )
1039
-
1040
- masked_image_latents = (
1041
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
1042
- )
1043
-
1044
- # aligning device to prevent device errors when concating it with the latent model input
1045
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
1046
-
1047
- return mask, masked_image_latents
1048
-
1049
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
1050
- def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
1051
- # get the original timestep using init_timestep
1052
- if denoising_start is None:
1053
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
1054
- t_start = max(num_inference_steps - init_timestep, 0)
1055
- else:
1056
- t_start = 0
1057
-
1058
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
1059
-
1060
- # Strength is irrelevant if we directly request a timestep to start at;
1061
- # that is, strength is determined by the denoising_start instead.
1062
- if denoising_start is not None:
1063
- discrete_timestep_cutoff = int(
1064
- round(
1065
- self.scheduler.config.num_train_timesteps
1066
- - (denoising_start * self.scheduler.config.num_train_timesteps)
1067
- )
1068
- )
1069
-
1070
- num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
1071
- if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
1072
- # if the scheduler is a 2nd order scheduler we might have to do +1
1073
- # because `num_inference_steps` might be even given that every timestep
1074
- # (except the highest one) is duplicated. If `num_inference_steps` is even it would
1075
- # mean that we cut the timesteps in the middle of the denoising step
1076
- # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1
1077
- # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
1078
- num_inference_steps = num_inference_steps + 1
1079
-
1080
- # because t_n+1 >= t_n, we slice the timesteps starting from the end
1081
- timesteps = timesteps[-num_inference_steps:]
1082
- return timesteps, num_inference_steps
1083
-
1084
- return timesteps, num_inference_steps - t_start
1085
-
1086
- def _get_add_time_ids(
1087
- self,
1088
- original_size,
1089
- crops_coords_top_left,
1090
- target_size,
1091
- aesthetic_score,
1092
- negative_aesthetic_score,
1093
- dtype,
1094
- text_encoder_projection_dim=None,
1095
- ):
1096
- if self.config.requires_aesthetics_score:
1097
- add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
1098
- add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
1099
- else:
1100
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
1101
- add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
1102
-
1103
- passed_add_embed_dim = (
1104
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
1105
- )
1106
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
1107
-
1108
- if (
1109
- expected_add_embed_dim > passed_add_embed_dim
1110
- and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
1111
- ):
1112
- raise ValueError(
1113
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
1114
- )
1115
- elif (
1116
- expected_add_embed_dim < passed_add_embed_dim
1117
- and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
1118
- ):
1119
- raise ValueError(
1120
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
1121
- )
1122
- elif expected_add_embed_dim != passed_add_embed_dim:
1123
- raise ValueError(
1124
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
1125
- )
1126
-
1127
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1128
- add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
1129
-
1130
- return add_time_ids, add_neg_time_ids
1131
-
1132
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1133
- def upcast_vae(self):
1134
- dtype = self.vae.dtype
1135
- self.vae.to(dtype=torch.float32)
1136
- use_torch_2_0_or_xformers = isinstance(
1137
- self.vae.decoder.mid_block.attentions[0].processor,
1138
- (
1139
- AttnProcessor2_0,
1140
- XFormersAttnProcessor,
1141
- LoRAXFormersAttnProcessor,
1142
- LoRAAttnProcessor2_0,
1143
- ),
1144
- )
1145
- # if xformers or torch_2_0 is used attention block does not need
1146
- # to be in float32 which can save lots of memory
1147
- if use_torch_2_0_or_xformers:
1148
- self.vae.post_quant_conv.to(dtype)
1149
- self.vae.decoder.conv_in.to(dtype)
1150
- self.vae.decoder.mid_block.to(dtype)
1151
-
1152
- # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
1153
- def _default_height_width(self, height, width, image):
1154
- # NOTE: It is possible that a list of images have different
1155
- # dimensions for each image, so just checking the first image
1156
- # is not _exactly_ correct, but it is simple.
1157
- while isinstance(image, list):
1158
- image = image[0]
1159
-
1160
- if height is None:
1161
- if isinstance(image, PIL.Image.Image):
1162
- height = image.height
1163
- elif isinstance(image, torch.Tensor):
1164
- height = image.shape[-2]
1165
-
1166
- # round down to nearest multiple of `self.adapter.downscale_factor`
1167
- height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
1168
-
1169
- if width is None:
1170
- if isinstance(image, PIL.Image.Image):
1171
- width = image.width
1172
- elif isinstance(image, torch.Tensor):
1173
- width = image.shape[-1]
1174
-
1175
- # round down to nearest multiple of `self.adapter.downscale_factor`
1176
- width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
1177
-
1178
- return height, width
1179
-
1180
- def prepare_control_image(
1181
- self,
1182
- image,
1183
- width,
1184
- height,
1185
- batch_size,
1186
- num_images_per_prompt,
1187
- device,
1188
- dtype,
1189
- do_classifier_free_guidance=False,
1190
- guess_mode=False,
1191
- ):
1192
- image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
1193
- image_batch_size = image.shape[0]
1194
-
1195
- if image_batch_size == 1:
1196
- repeat_by = batch_size
1197
- else:
1198
- # image batch size is the same as prompt batch size
1199
- repeat_by = num_images_per_prompt
1200
-
1201
- image = image.repeat_interleave(repeat_by, dim=0)
1202
-
1203
- image = image.to(device=device, dtype=dtype)
1204
-
1205
- if do_classifier_free_guidance and not guess_mode:
1206
- image = torch.cat([image] * 2)
1207
-
1208
- return image
1209
-
1210
- @torch.no_grad()
1211
- @replace_example_docstring(EXAMPLE_DOC_STRING)
1212
- def __call__(
1213
- self,
1214
- prompt: Optional[Union[str, List[str]]] = None,
1215
- prompt_2: Optional[Union[str, List[str]]] = None,
1216
- image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
1217
- mask_image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
1218
- adapter_image: PipelineImageInput = None,
1219
- control_image: PipelineImageInput = None,
1220
- height: Optional[int] = None,
1221
- width: Optional[int] = None,
1222
- strength: float = 0.9999,
1223
- num_inference_steps: int = 50,
1224
- denoising_start: Optional[float] = None,
1225
- denoising_end: Optional[float] = None,
1226
- guidance_scale: float = 5.0,
1227
- negative_prompt: Optional[Union[str, List[str]]] = None,
1228
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
1229
- num_images_per_prompt: Optional[int] = 1,
1230
- eta: float = 0.0,
1231
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1232
- latents: Optional[Union[torch.Tensor]] = None,
1233
- prompt_embeds: Optional[torch.Tensor] = None,
1234
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1235
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
1236
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1237
- output_type: Optional[str] = "pil",
1238
- return_dict: bool = True,
1239
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1240
- callback_steps: int = 1,
1241
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1242
- guidance_rescale: float = 0.0,
1243
- original_size: Optional[Tuple[int, int]] = None,
1244
- crops_coords_top_left: Optional[Tuple[int, int]] = (0, 0),
1245
- target_size: Optional[Tuple[int, int]] = None,
1246
- adapter_conditioning_scale: Optional[Union[float, List[float]]] = 1.0,
1247
- cond_tau: float = 1.0,
1248
- aesthetic_score: float = 6.0,
1249
- negative_aesthetic_score: float = 2.5,
1250
- controlnet_conditioning_scale=1.0,
1251
- guess_mode: bool = False,
1252
- control_guidance_start=0.0,
1253
- control_guidance_end=1.0,
1254
- ):
1255
- r"""
1256
- Function invoked when calling the pipeline for generation.
1257
-
1258
- Args:
1259
- prompt (`str` or `List[str]`, *optional*):
1260
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1261
- instead.
1262
- prompt_2 (`str` or `List[str]`, *optional*):
1263
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1264
- used in both text-encoders
1265
- image (`PIL.Image.Image`):
1266
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
1267
- be masked out with `mask_image` and repainted according to `prompt`.
1268
- mask_image (`PIL.Image.Image`):
1269
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1270
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
1271
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
1272
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
1273
- adapter_image (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
1274
- The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
1275
- type is specified as `torch.Tensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
1276
- accepted as an image. The control image is automatically resized to fit the output image.
1277
- control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1278
- `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1279
- The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
1280
- specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
1281
- accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
1282
- and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
1283
- `init`, images must be passed as a list such that each element of the list can be correctly batched for
1284
- input to a single ControlNet.
1285
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1286
- The height in pixels of the generated image.
1287
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1288
- The width in pixels of the generated image.
1289
- strength (`float`, *optional*, defaults to 1.0):
1290
- Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
1291
- starting point and more noise is added the higher the `strength`. The number of denoising steps depends
1292
- on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
1293
- process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
1294
- essentially ignores `image`.
1295
- num_inference_steps (`int`, *optional*, defaults to 50):
1296
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1297
- expense of slower inference.
1298
- denoising_start (`float`, *optional*):
1299
- When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
1300
- bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
1301
- it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
1302
- strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
1303
- is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
1304
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1305
- denoising_end (`float`, *optional*):
1306
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1307
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
1308
- still retain a substantial amount of noise as determined by the discrete timesteps selected by the
1309
- scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
1310
- "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1311
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
1312
- guidance_scale (`float`, *optional*, defaults to 5.0):
1313
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1314
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1315
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1316
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1317
- usually at the expense of lower image quality.
1318
- negative_prompt (`str` or `List[str]`, *optional*):
1319
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
1320
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1321
- less than `1`).
1322
- negative_prompt_2 (`str` or `List[str]`, *optional*):
1323
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1324
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1325
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1326
- The number of images to generate per prompt.
1327
- eta (`float`, *optional*, defaults to 0.0):
1328
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1329
- [`schedulers.DDIMScheduler`], will be ignored for others.
1330
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1331
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1332
- to make generation deterministic.
1333
- latents (`torch.Tensor`, *optional*):
1334
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1335
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1336
- tensor will ge generated by sampling using the supplied random `generator`.
1337
- prompt_embeds (`torch.Tensor`, *optional*):
1338
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1339
- provided, text embeddings will be generated from `prompt` input argument.
1340
- negative_prompt_embeds (`torch.Tensor`, *optional*):
1341
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1342
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1343
- argument.
1344
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
1345
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1346
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
1347
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
1348
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1349
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1350
- input argument.
1351
- output_type (`str`, *optional*, defaults to `"pil"`):
1352
- The output format of the generate image. Choose between
1353
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1354
- return_dict (`bool`, *optional*, defaults to `True`):
1355
- Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`]
1356
- instead of a plain tuple.
1357
- callback (`Callable`, *optional*):
1358
- A function that will be called every `callback_steps` steps during inference. The function will be
1359
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
1360
- callback_steps (`int`, *optional*, defaults to 1):
1361
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1362
- called at every step.
1363
- cross_attention_kwargs (`dict`, *optional*):
1364
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1365
- `self.processor` in
1366
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1367
- guidance_rescale (`float`, *optional*, defaults to 0.7):
1368
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1369
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1370
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1371
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
1372
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1373
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1374
- `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
1375
- explained in section 2.2 of
1376
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1377
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1378
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1379
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1380
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1381
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1382
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1383
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
1384
- not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
1385
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1386
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1387
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the
1388
- residual in the original unet. If multiple adapters are specified in init, you can set the
1389
- corresponding scale as a list.
1390
- adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1391
- The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
1392
- residual in the original unet. If multiple adapters are specified in init, you can set the
1393
- corresponding scale as a list.
1394
- aesthetic_score (`float`, *optional*, defaults to 6.0):
1395
- Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1396
- Part of SDXL's micro-conditioning as explained in section 2.2 of
1397
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1398
- negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1399
- Part of SDXL's micro-conditioning as explained in section 2.2 of
1400
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1401
- simulate an aesthetic score of the generated image by influencing the negative text condition.
1402
- Examples:
1403
-
1404
- Returns:
1405
- [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
1406
- [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
1407
- `tuple`. When returning a tuple, the first element is a list with the generated images.
1408
- """
1409
- # 0. Default height and width to unet
1410
- controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1411
- adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter
1412
- height, width = self._default_height_width(height, width, adapter_image)
1413
- device = self._execution_device
1414
-
1415
- if isinstance(adapter, MultiAdapter):
1416
- adapter_input = []
1417
- for one_image in adapter_image:
1418
- one_image = _preprocess_adapter_image(one_image, height, width)
1419
- one_image = one_image.to(device=device, dtype=adapter.dtype)
1420
- adapter_input.append(one_image)
1421
- else:
1422
- adapter_input = _preprocess_adapter_image(adapter_image, height, width)
1423
- adapter_input = adapter_input.to(device=device, dtype=adapter.dtype)
1424
-
1425
- original_size = original_size or (height, width)
1426
- target_size = target_size or (height, width)
1427
-
1428
- # 0.1 align format for control guidance
1429
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1430
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1431
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1432
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1433
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1434
- mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1435
- control_guidance_start, control_guidance_end = (
1436
- mult * [control_guidance_start],
1437
- mult * [control_guidance_end],
1438
- )
1439
-
1440
- if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1441
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1442
- if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float):
1443
- adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.nets)
1444
-
1445
- # 1. Check inputs. Raise error if not correct
1446
- self.check_inputs(
1447
- prompt,
1448
- prompt_2,
1449
- height,
1450
- width,
1451
- callback_steps,
1452
- negative_prompt=negative_prompt,
1453
- negative_prompt_2=negative_prompt_2,
1454
- prompt_embeds=prompt_embeds,
1455
- negative_prompt_embeds=negative_prompt_embeds,
1456
- pooled_prompt_embeds=pooled_prompt_embeds,
1457
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1458
- )
1459
-
1460
- self.check_conditions(
1461
- prompt,
1462
- prompt_embeds,
1463
- adapter_image,
1464
- control_image,
1465
- adapter_conditioning_scale,
1466
- controlnet_conditioning_scale,
1467
- control_guidance_start,
1468
- control_guidance_end,
1469
- )
1470
-
1471
- # 2. Define call parameters
1472
- if prompt is not None and isinstance(prompt, str):
1473
- batch_size = 1
1474
- elif prompt is not None and isinstance(prompt, list):
1475
- batch_size = len(prompt)
1476
- else:
1477
- batch_size = prompt_embeds.shape[0]
1478
-
1479
- device = self._execution_device
1480
-
1481
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1482
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1483
- # corresponds to doing no classifier free guidance.
1484
- do_classifier_free_guidance = guidance_scale > 1.0
1485
-
1486
- # 3. Encode input prompt
1487
- (
1488
- prompt_embeds,
1489
- negative_prompt_embeds,
1490
- pooled_prompt_embeds,
1491
- negative_pooled_prompt_embeds,
1492
- ) = self.encode_prompt(
1493
- prompt=prompt,
1494
- prompt_2=prompt_2,
1495
- device=device,
1496
- num_images_per_prompt=num_images_per_prompt,
1497
- do_classifier_free_guidance=do_classifier_free_guidance,
1498
- negative_prompt=negative_prompt,
1499
- negative_prompt_2=negative_prompt_2,
1500
- prompt_embeds=prompt_embeds,
1501
- negative_prompt_embeds=negative_prompt_embeds,
1502
- pooled_prompt_embeds=pooled_prompt_embeds,
1503
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1504
- )
1505
-
1506
- # 4. set timesteps
1507
- def denoising_value_valid(dnv):
1508
- return isinstance(dnv, float) and 0 < dnv < 1
1509
-
1510
- self.scheduler.set_timesteps(num_inference_steps, device=device)
1511
- timesteps, num_inference_steps = self.get_timesteps(
1512
- num_inference_steps,
1513
- strength,
1514
- device,
1515
- denoising_start=denoising_start if denoising_value_valid(denoising_start) else None,
1516
- )
1517
- # check that number of inference steps is not < 1 - as this doesn't make sense
1518
- if num_inference_steps < 1:
1519
- raise ValueError(
1520
- f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1521
- f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1522
- )
1523
- # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1524
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1525
- # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1526
- is_strength_max = strength == 1.0
1527
-
1528
- # 5. Preprocess mask and image - resizes image and mask w.r.t height and width
1529
- mask, masked_image, init_image = prepare_mask_and_masked_image(
1530
- image, mask_image, height, width, return_image=True
1531
- )
1532
-
1533
- # 6. Prepare latent variables
1534
- num_channels_latents = self.vae.config.latent_channels
1535
- num_channels_unet = self.unet.config.in_channels
1536
- return_image_latents = num_channels_unet == 4
1537
-
1538
- add_noise = denoising_start is None
1539
- latents_outputs = self.prepare_latents(
1540
- batch_size * num_images_per_prompt,
1541
- num_channels_latents,
1542
- height,
1543
- width,
1544
- prompt_embeds.dtype,
1545
- device,
1546
- generator,
1547
- latents,
1548
- image=init_image,
1549
- timestep=latent_timestep,
1550
- is_strength_max=is_strength_max,
1551
- add_noise=add_noise,
1552
- return_noise=True,
1553
- return_image_latents=return_image_latents,
1554
- )
1555
-
1556
- if return_image_latents:
1557
- latents, noise, image_latents = latents_outputs
1558
- else:
1559
- latents, noise = latents_outputs
1560
-
1561
- # 7. Prepare mask latent variables
1562
- mask, masked_image_latents = self.prepare_mask_latents(
1563
- mask,
1564
- masked_image,
1565
- batch_size * num_images_per_prompt,
1566
- height,
1567
- width,
1568
- prompt_embeds.dtype,
1569
- device,
1570
- generator,
1571
- do_classifier_free_guidance,
1572
- )
1573
-
1574
- # 8. Check that sizes of mask, masked image and latents match
1575
- if num_channels_unet == 9:
1576
- # default case for runwayml/stable-diffusion-inpainting
1577
- num_channels_mask = mask.shape[1]
1578
- num_channels_masked_image = masked_image_latents.shape[1]
1579
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1580
- raise ValueError(
1581
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1582
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1583
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1584
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
1585
- " `pipeline.unet` or your `mask_image` or `image` input."
1586
- )
1587
- elif num_channels_unet != 4:
1588
- raise ValueError(
1589
- f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1590
- )
1591
-
1592
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1593
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1594
-
1595
- # 10. Prepare added time ids & embeddings & adapter features
1596
- if isinstance(adapter, MultiAdapter):
1597
- adapter_state = adapter(adapter_input, adapter_conditioning_scale)
1598
- for k, v in enumerate(adapter_state):
1599
- adapter_state[k] = v
1600
- else:
1601
- adapter_state = adapter(adapter_input)
1602
- for k, v in enumerate(adapter_state):
1603
- adapter_state[k] = v * adapter_conditioning_scale
1604
- if num_images_per_prompt > 1:
1605
- for k, v in enumerate(adapter_state):
1606
- adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
1607
- if do_classifier_free_guidance:
1608
- for k, v in enumerate(adapter_state):
1609
- adapter_state[k] = torch.cat([v] * 2, dim=0)
1610
-
1611
- # 10.2 Prepare control images
1612
- if isinstance(controlnet, ControlNetModel):
1613
- control_image = self.prepare_control_image(
1614
- image=control_image,
1615
- width=width,
1616
- height=height,
1617
- batch_size=batch_size * num_images_per_prompt,
1618
- num_images_per_prompt=num_images_per_prompt,
1619
- device=device,
1620
- dtype=controlnet.dtype,
1621
- do_classifier_free_guidance=do_classifier_free_guidance,
1622
- guess_mode=guess_mode,
1623
- )
1624
- elif isinstance(controlnet, MultiControlNetModel):
1625
- control_images = []
1626
-
1627
- for control_image_ in control_image:
1628
- control_image_ = self.prepare_control_image(
1629
- image=control_image_,
1630
- width=width,
1631
- height=height,
1632
- batch_size=batch_size * num_images_per_prompt,
1633
- num_images_per_prompt=num_images_per_prompt,
1634
- device=device,
1635
- dtype=controlnet.dtype,
1636
- do_classifier_free_guidance=do_classifier_free_guidance,
1637
- guess_mode=guess_mode,
1638
- )
1639
-
1640
- control_images.append(control_image_)
1641
-
1642
- control_image = control_images
1643
- else:
1644
- raise ValueError(f"{controlnet.__class__} is not supported.")
1645
-
1646
- # 8.2 Create tensor stating which controlnets to keep
1647
- controlnet_keep = []
1648
- for i in range(len(timesteps)):
1649
- keeps = [
1650
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1651
- for s, e in zip(control_guidance_start, control_guidance_end)
1652
- ]
1653
- if isinstance(self.controlnet, MultiControlNetModel):
1654
- controlnet_keep.append(keeps)
1655
- else:
1656
- controlnet_keep.append(keeps[0])
1657
- # ----------------------------------------------------------------
1658
-
1659
- add_text_embeds = pooled_prompt_embeds
1660
- if self.text_encoder_2 is None:
1661
- text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1662
- else:
1663
- text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1664
-
1665
- add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1666
- original_size,
1667
- crops_coords_top_left,
1668
- target_size,
1669
- aesthetic_score,
1670
- negative_aesthetic_score,
1671
- dtype=prompt_embeds.dtype,
1672
- text_encoder_projection_dim=text_encoder_projection_dim,
1673
- )
1674
- add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1675
-
1676
- if do_classifier_free_guidance:
1677
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1678
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1679
- add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1680
- add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
1681
-
1682
- prompt_embeds = prompt_embeds.to(device)
1683
- add_text_embeds = add_text_embeds.to(device)
1684
- add_time_ids = add_time_ids.to(device)
1685
-
1686
- # 11. Denoising loop
1687
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1688
-
1689
- # 11.1 Apply denoising_end
1690
- if (
1691
- denoising_end is not None
1692
- and denoising_start is not None
1693
- and denoising_value_valid(denoising_end)
1694
- and denoising_value_valid(denoising_start)
1695
- and denoising_start >= denoising_end
1696
- ):
1697
- raise ValueError(
1698
- f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
1699
- + f" {denoising_end} when using type float."
1700
- )
1701
- elif denoising_end is not None and denoising_value_valid(denoising_end):
1702
- discrete_timestep_cutoff = int(
1703
- round(
1704
- self.scheduler.config.num_train_timesteps
1705
- - (denoising_end * self.scheduler.config.num_train_timesteps)
1706
- )
1707
- )
1708
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1709
- timesteps = timesteps[:num_inference_steps]
1710
-
1711
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1712
- for i, t in enumerate(timesteps):
1713
- # expand the latents if we are doing classifier free guidance
1714
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1715
-
1716
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1717
-
1718
- if num_channels_unet == 9:
1719
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1720
-
1721
- # predict the noise residual
1722
- added_cond_kwargs = {
1723
- "text_embeds": add_text_embeds,
1724
- "time_ids": add_time_ids,
1725
- }
1726
-
1727
- if i < int(num_inference_steps * cond_tau):
1728
- down_block_additional_residuals = [state.clone() for state in adapter_state]
1729
- else:
1730
- down_block_additional_residuals = None
1731
-
1732
- # ----------- ControlNet
1733
-
1734
- # expand the latents if we are doing classifier free guidance
1735
- latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1736
-
1737
- # concat latents, mask, masked_image_latents in the channel dimension
1738
- latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t)
1739
-
1740
- # controlnet(s) inference
1741
- if guess_mode and do_classifier_free_guidance:
1742
- # Infer ControlNet only for the conditional batch.
1743
- control_model_input = latents
1744
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1745
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1746
- controlnet_added_cond_kwargs = {
1747
- "text_embeds": add_text_embeds.chunk(2)[1],
1748
- "time_ids": add_time_ids.chunk(2)[1],
1749
- }
1750
- else:
1751
- control_model_input = latent_model_input_controlnet
1752
- controlnet_prompt_embeds = prompt_embeds
1753
- controlnet_added_cond_kwargs = added_cond_kwargs
1754
-
1755
- if isinstance(controlnet_keep[i], list):
1756
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1757
- else:
1758
- controlnet_cond_scale = controlnet_conditioning_scale
1759
- if isinstance(controlnet_cond_scale, list):
1760
- controlnet_cond_scale = controlnet_cond_scale[0]
1761
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
1762
- down_block_res_samples, mid_block_res_sample = self.controlnet(
1763
- control_model_input,
1764
- t,
1765
- encoder_hidden_states=controlnet_prompt_embeds,
1766
- controlnet_cond=control_image,
1767
- conditioning_scale=cond_scale,
1768
- guess_mode=guess_mode,
1769
- added_cond_kwargs=controlnet_added_cond_kwargs,
1770
- return_dict=False,
1771
- )
1772
-
1773
- noise_pred = self.unet(
1774
- latent_model_input,
1775
- t,
1776
- encoder_hidden_states=prompt_embeds,
1777
- cross_attention_kwargs=cross_attention_kwargs,
1778
- added_cond_kwargs=added_cond_kwargs,
1779
- return_dict=False,
1780
- down_intrablock_additional_residuals=down_block_additional_residuals, # t2iadapter
1781
- down_block_additional_residuals=down_block_res_samples, # controlnet
1782
- mid_block_additional_residual=mid_block_res_sample, # controlnet
1783
- )[0]
1784
-
1785
- # perform guidance
1786
- if do_classifier_free_guidance:
1787
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1788
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1789
-
1790
- if do_classifier_free_guidance and guidance_rescale > 0.0:
1791
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1792
- noise_pred = rescale_noise_cfg(
1793
- noise_pred,
1794
- noise_pred_text,
1795
- guidance_rescale=guidance_rescale,
1796
- )
1797
-
1798
- # compute the previous noisy sample x_t -> x_t-1
1799
- latents = self.scheduler.step(
1800
- noise_pred,
1801
- t,
1802
- latents,
1803
- **extra_step_kwargs,
1804
- return_dict=False,
1805
- )[0]
1806
-
1807
- if num_channels_unet == 4:
1808
- init_latents_proper = image_latents
1809
- if do_classifier_free_guidance:
1810
- init_mask, _ = mask.chunk(2)
1811
- else:
1812
- init_mask = mask
1813
-
1814
- if i < len(timesteps) - 1:
1815
- noise_timestep = timesteps[i + 1]
1816
- init_latents_proper = self.scheduler.add_noise(
1817
- init_latents_proper,
1818
- noise,
1819
- torch.tensor([noise_timestep]),
1820
- )
1821
-
1822
- latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1823
-
1824
- # call the callback, if provided
1825
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1826
- progress_bar.update()
1827
- if callback is not None and i % callback_steps == 0:
1828
- callback(i, t, latents)
1829
-
1830
- # make sure the VAE is in float32 mode, as it overflows in float16
1831
- if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
1832
- self.upcast_vae()
1833
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1834
-
1835
- if output_type != "latent":
1836
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1837
- else:
1838
- image = latents
1839
- return StableDiffusionXLPipelineOutput(images=image)
1840
-
1841
- image = self.image_processor.postprocess(image, output_type=output_type)
1842
-
1843
- # Offload last model to CPU
1844
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1845
- self.final_offload_hook.offload()
1846
-
1847
- if not return_dict:
1848
- return (image,)
1849
-
1850
- return StableDiffusionXLPipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_xl_differential_img2img.py DELETED
@@ -1,1470 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
-
18
- import numpy as np
19
- import PIL.Image
20
- import torch
21
- import torchvision
22
- from transformers import (
23
- CLIPImageProcessor,
24
- CLIPTextModel,
25
- CLIPTextModelWithProjection,
26
- CLIPTokenizer,
27
- CLIPVisionModelWithProjection,
28
- )
29
-
30
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
31
- from diffusers.loaders import (
32
- FromSingleFileMixin,
33
- IPAdapterMixin,
34
- StableDiffusionXLLoraLoaderMixin,
35
- TextualInversionLoaderMixin,
36
- )
37
- from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
38
- from diffusers.models.attention_processor import (
39
- AttnProcessor2_0,
40
- LoRAAttnProcessor2_0,
41
- LoRAXFormersAttnProcessor,
42
- XFormersAttnProcessor,
43
- )
44
- from diffusers.models.lora import adjust_lora_scale_text_encoder
45
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
46
- from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
47
- from diffusers.schedulers import KarrasDiffusionSchedulers
48
- from diffusers.utils import (
49
- USE_PEFT_BACKEND,
50
- deprecate,
51
- is_invisible_watermark_available,
52
- is_torch_xla_available,
53
- logging,
54
- replace_example_docstring,
55
- scale_lora_layers,
56
- unscale_lora_layers,
57
- )
58
- from diffusers.utils.torch_utils import randn_tensor
59
-
60
-
61
- if is_invisible_watermark_available():
62
- from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
63
-
64
- if is_torch_xla_available():
65
- import torch_xla.core.xla_model as xm
66
-
67
- XLA_AVAILABLE = True
68
- else:
69
- XLA_AVAILABLE = False
70
-
71
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
72
-
73
- EXAMPLE_DOC_STRING = """
74
- Examples:
75
- ```py
76
- >>> import torch
77
- >>> from diffusers import StableDiffusionXLImg2ImgPipeline
78
- >>> from diffusers.utils import load_image
79
-
80
- >>> pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
81
- ... "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16
82
- ... )
83
- >>> pipe = pipe.to("cuda")
84
- >>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
85
-
86
- >>> init_image = load_image(url).convert("RGB")
87
- >>> prompt = "a photo of an astronaut riding a horse on mars"
88
- >>> image = pipe(prompt, image=init_image).images[0]
89
- ```
90
- """
91
-
92
-
93
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
94
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
95
- """
96
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
97
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
98
- """
99
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
100
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
101
- # rescale the results from guidance (fixes overexposure)
102
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
103
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
104
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
105
- return noise_cfg
106
-
107
-
108
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
109
- def retrieve_latents(
110
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
111
- ):
112
- if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
113
- return encoder_output.latent_dist.sample(generator)
114
- elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
115
- return encoder_output.latent_dist.mode()
116
- elif hasattr(encoder_output, "latents"):
117
- return encoder_output.latents
118
- else:
119
- raise AttributeError("Could not access latents of provided encoder_output")
120
-
121
-
122
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
123
- def retrieve_timesteps(
124
- scheduler,
125
- num_inference_steps: Optional[int] = None,
126
- device: Optional[Union[str, torch.device]] = None,
127
- timesteps: Optional[List[int]] = None,
128
- **kwargs,
129
- ):
130
- """
131
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
132
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
133
-
134
- Args:
135
- scheduler (`SchedulerMixin`):
136
- The scheduler to get timesteps from.
137
- num_inference_steps (`int`):
138
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
139
- `timesteps` must be `None`.
140
- device (`str` or `torch.device`, *optional*):
141
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
142
- timesteps (`List[int]`, *optional*):
143
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
144
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
145
- must be `None`.
146
-
147
- Returns:
148
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
149
- second element is the number of inference steps.
150
- """
151
- if timesteps is not None:
152
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
153
- if not accepts_timesteps:
154
- raise ValueError(
155
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
156
- f" timestep schedules. Please check whether you are using the correct scheduler."
157
- )
158
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
159
- timesteps = scheduler.timesteps
160
- num_inference_steps = len(timesteps)
161
- else:
162
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
163
- timesteps = scheduler.timesteps
164
- return timesteps, num_inference_steps
165
-
166
-
167
- class StableDiffusionXLDifferentialImg2ImgPipeline(
168
- DiffusionPipeline,
169
- StableDiffusionMixin,
170
- TextualInversionLoaderMixin,
171
- FromSingleFileMixin,
172
- StableDiffusionXLLoraLoaderMixin,
173
- IPAdapterMixin,
174
- ):
175
- r"""
176
- Pipeline for text-to-image generation using Stable Diffusion XL.
177
-
178
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
179
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
180
-
181
- In addition the pipeline inherits the following loading methods:
182
- - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
183
- - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
184
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
185
-
186
- as well as the following saving methods:
187
- - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
188
-
189
- Args:
190
- vae ([`AutoencoderKL`]):
191
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
192
- text_encoder ([`CLIPTextModel`]):
193
- Frozen text-encoder. Stable Diffusion XL uses the text portion of
194
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
195
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
196
- text_encoder_2 ([` CLIPTextModelWithProjection`]):
197
- Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
198
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
199
- specifically the
200
- [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
201
- variant.
202
- tokenizer (`CLIPTokenizer`):
203
- Tokenizer of class
204
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
205
- tokenizer_2 (`CLIPTokenizer`):
206
- Second Tokenizer of class
207
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
208
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
209
- scheduler ([`SchedulerMixin`]):
210
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
211
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
212
- """
213
-
214
- model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
215
- _optional_components = [
216
- "tokenizer",
217
- "tokenizer_2",
218
- "text_encoder",
219
- "text_encoder_2",
220
- "image_encoder",
221
- "feature_extractor",
222
- ]
223
- _callback_tensor_inputs = [
224
- "latents",
225
- "prompt_embeds",
226
- "negative_prompt_embeds",
227
- "add_text_embeds",
228
- "add_time_ids",
229
- "negative_pooled_prompt_embeds",
230
- "add_neg_time_ids",
231
- ]
232
-
233
- def __init__(
234
- self,
235
- vae: AutoencoderKL,
236
- text_encoder: CLIPTextModel,
237
- text_encoder_2: CLIPTextModelWithProjection,
238
- tokenizer: CLIPTokenizer,
239
- tokenizer_2: CLIPTokenizer,
240
- unet: UNet2DConditionModel,
241
- scheduler: KarrasDiffusionSchedulers,
242
- image_encoder: CLIPVisionModelWithProjection = None,
243
- feature_extractor: CLIPImageProcessor = None,
244
- requires_aesthetics_score: bool = False,
245
- force_zeros_for_empty_prompt: bool = True,
246
- add_watermarker: Optional[bool] = None,
247
- ):
248
- super().__init__()
249
-
250
- self.register_modules(
251
- vae=vae,
252
- text_encoder=text_encoder,
253
- text_encoder_2=text_encoder_2,
254
- tokenizer=tokenizer,
255
- tokenizer_2=tokenizer_2,
256
- unet=unet,
257
- image_encoder=image_encoder,
258
- feature_extractor=feature_extractor,
259
- scheduler=scheduler,
260
- )
261
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
262
- self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
263
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
264
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
265
-
266
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
267
-
268
- if add_watermarker:
269
- self.watermark = StableDiffusionXLWatermarker()
270
- else:
271
- self.watermark = None
272
-
273
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
274
- def encode_prompt(
275
- self,
276
- prompt: str,
277
- prompt_2: Optional[str] = None,
278
- device: Optional[torch.device] = None,
279
- num_images_per_prompt: int = 1,
280
- do_classifier_free_guidance: bool = True,
281
- negative_prompt: Optional[str] = None,
282
- negative_prompt_2: Optional[str] = None,
283
- prompt_embeds: Optional[torch.Tensor] = None,
284
- negative_prompt_embeds: Optional[torch.Tensor] = None,
285
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
286
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
287
- lora_scale: Optional[float] = None,
288
- clip_skip: Optional[int] = None,
289
- ):
290
- r"""
291
- Encodes the prompt into text encoder hidden states.
292
-
293
- Args:
294
- prompt (`str` or `List[str]`, *optional*):
295
- prompt to be encoded
296
- prompt_2 (`str` or `List[str]`, *optional*):
297
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
298
- used in both text-encoders
299
- device: (`torch.device`):
300
- torch device
301
- num_images_per_prompt (`int`):
302
- number of images that should be generated per prompt
303
- do_classifier_free_guidance (`bool`):
304
- whether to use classifier free guidance or not
305
- negative_prompt (`str` or `List[str]`, *optional*):
306
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
307
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
308
- less than `1`).
309
- negative_prompt_2 (`str` or `List[str]`, *optional*):
310
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
311
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
312
- prompt_embeds (`torch.Tensor`, *optional*):
313
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
314
- provided, text embeddings will be generated from `prompt` input argument.
315
- negative_prompt_embeds (`torch.Tensor`, *optional*):
316
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
317
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
318
- argument.
319
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
320
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
321
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
322
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
323
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
324
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
325
- input argument.
326
- lora_scale (`float`, *optional*):
327
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
328
- clip_skip (`int`, *optional*):
329
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
330
- the output of the pre-final layer will be used for computing the prompt embeddings.
331
- """
332
- device = device or self._execution_device
333
-
334
- # set lora scale so that monkey patched LoRA
335
- # function of text encoder can correctly access it
336
- if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
337
- self._lora_scale = lora_scale
338
-
339
- # dynamically adjust the LoRA scale
340
- if self.text_encoder is not None:
341
- if not USE_PEFT_BACKEND:
342
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
343
- else:
344
- scale_lora_layers(self.text_encoder, lora_scale)
345
-
346
- if self.text_encoder_2 is not None:
347
- if not USE_PEFT_BACKEND:
348
- adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
349
- else:
350
- scale_lora_layers(self.text_encoder_2, lora_scale)
351
-
352
- prompt = [prompt] if isinstance(prompt, str) else prompt
353
-
354
- if prompt is not None:
355
- batch_size = len(prompt)
356
- else:
357
- batch_size = prompt_embeds.shape[0]
358
-
359
- # Define tokenizers and text encoders
360
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
361
- text_encoders = (
362
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
363
- )
364
-
365
- if prompt_embeds is None:
366
- prompt_2 = prompt_2 or prompt
367
- prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
368
-
369
- # textual inversion: process multi-vector tokens if necessary
370
- prompt_embeds_list = []
371
- prompts = [prompt, prompt_2]
372
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
373
- if isinstance(self, TextualInversionLoaderMixin):
374
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
375
-
376
- text_inputs = tokenizer(
377
- prompt,
378
- padding="max_length",
379
- max_length=tokenizer.model_max_length,
380
- truncation=True,
381
- return_tensors="pt",
382
- )
383
-
384
- text_input_ids = text_inputs.input_ids
385
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
386
-
387
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
388
- text_input_ids, untruncated_ids
389
- ):
390
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
391
- logger.warning(
392
- "The following part of your input was truncated because CLIP can only handle sequences up to"
393
- f" {tokenizer.model_max_length} tokens: {removed_text}"
394
- )
395
-
396
- prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
397
-
398
- # We are only ALWAYS interested in the pooled output of the final text encoder
399
- pooled_prompt_embeds = prompt_embeds[0]
400
- if clip_skip is None:
401
- prompt_embeds = prompt_embeds.hidden_states[-2]
402
- else:
403
- # "2" because SDXL always indexes from the penultimate layer.
404
- prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
405
-
406
- prompt_embeds_list.append(prompt_embeds)
407
-
408
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
409
-
410
- # get unconditional embeddings for classifier free guidance
411
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
412
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
413
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
414
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
415
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
416
- negative_prompt = negative_prompt or ""
417
- negative_prompt_2 = negative_prompt_2 or negative_prompt
418
-
419
- # normalize str to list
420
- negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
421
- negative_prompt_2 = (
422
- batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
423
- )
424
-
425
- uncond_tokens: List[str]
426
- if prompt is not None and type(prompt) is not type(negative_prompt):
427
- raise TypeError(
428
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
429
- f" {type(prompt)}."
430
- )
431
- elif batch_size != len(negative_prompt):
432
- raise ValueError(
433
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
434
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
435
- " the batch size of `prompt`."
436
- )
437
- else:
438
- uncond_tokens = [negative_prompt, negative_prompt_2]
439
-
440
- negative_prompt_embeds_list = []
441
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
442
- if isinstance(self, TextualInversionLoaderMixin):
443
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
444
-
445
- max_length = prompt_embeds.shape[1]
446
- uncond_input = tokenizer(
447
- negative_prompt,
448
- padding="max_length",
449
- max_length=max_length,
450
- truncation=True,
451
- return_tensors="pt",
452
- )
453
-
454
- negative_prompt_embeds = text_encoder(
455
- uncond_input.input_ids.to(device),
456
- output_hidden_states=True,
457
- )
458
- # We are only ALWAYS interested in the pooled output of the final text encoder
459
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
460
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
461
-
462
- negative_prompt_embeds_list.append(negative_prompt_embeds)
463
-
464
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
465
-
466
- if self.text_encoder_2 is not None:
467
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
468
- else:
469
- prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
470
-
471
- bs_embed, seq_len, _ = prompt_embeds.shape
472
- # duplicate text embeddings for each generation per prompt, using mps friendly method
473
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
474
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
475
-
476
- if do_classifier_free_guidance:
477
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
478
- seq_len = negative_prompt_embeds.shape[1]
479
-
480
- if self.text_encoder_2 is not None:
481
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
482
- else:
483
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
484
-
485
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
486
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
487
-
488
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
489
- bs_embed * num_images_per_prompt, -1
490
- )
491
- if do_classifier_free_guidance:
492
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
493
- bs_embed * num_images_per_prompt, -1
494
- )
495
-
496
- if self.text_encoder is not None:
497
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
498
- # Retrieve the original scale by scaling back the LoRA layers
499
- unscale_lora_layers(self.text_encoder, lora_scale)
500
-
501
- if self.text_encoder_2 is not None:
502
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
503
- # Retrieve the original scale by scaling back the LoRA layers
504
- unscale_lora_layers(self.text_encoder_2, lora_scale)
505
-
506
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
507
-
508
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
509
- def prepare_extra_step_kwargs(self, generator, eta):
510
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
511
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
512
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
513
- # and should be between [0, 1]
514
-
515
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
516
- extra_step_kwargs = {}
517
- if accepts_eta:
518
- extra_step_kwargs["eta"] = eta
519
-
520
- # check if the scheduler accepts generator
521
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
522
- if accepts_generator:
523
- extra_step_kwargs["generator"] = generator
524
- return extra_step_kwargs
525
-
526
- def check_inputs(
527
- self,
528
- prompt,
529
- prompt_2,
530
- strength,
531
- num_inference_steps,
532
- callback_steps,
533
- negative_prompt=None,
534
- negative_prompt_2=None,
535
- prompt_embeds=None,
536
- negative_prompt_embeds=None,
537
- ip_adapter_image=None,
538
- ip_adapter_image_embeds=None,
539
- callback_on_step_end_tensor_inputs=None,
540
- ):
541
- if strength < 0 or strength > 1:
542
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
543
- if num_inference_steps is None:
544
- raise ValueError("`num_inference_steps` cannot be None.")
545
- elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
546
- raise ValueError(
547
- f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
548
- f" {type(num_inference_steps)}."
549
- )
550
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
551
- raise ValueError(
552
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
553
- f" {type(callback_steps)}."
554
- )
555
-
556
- if callback_on_step_end_tensor_inputs is not None and not all(
557
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
558
- ):
559
- raise ValueError(
560
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
561
- )
562
-
563
- if prompt is not None and prompt_embeds is not None:
564
- raise ValueError(
565
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
566
- " only forward one of the two."
567
- )
568
- elif prompt_2 is not None and prompt_embeds is not None:
569
- raise ValueError(
570
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
571
- " only forward one of the two."
572
- )
573
- elif prompt is None and prompt_embeds is None:
574
- raise ValueError(
575
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
576
- )
577
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
578
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
579
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
580
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
581
-
582
- if negative_prompt is not None and negative_prompt_embeds is not None:
583
- raise ValueError(
584
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
585
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
586
- )
587
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
588
- raise ValueError(
589
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
590
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
591
- )
592
-
593
- if prompt_embeds is not None and negative_prompt_embeds is not None:
594
- if prompt_embeds.shape != negative_prompt_embeds.shape:
595
- raise ValueError(
596
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
597
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
598
- f" {negative_prompt_embeds.shape}."
599
- )
600
-
601
- if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
602
- raise ValueError(
603
- "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
604
- )
605
-
606
- if ip_adapter_image_embeds is not None:
607
- if not isinstance(ip_adapter_image_embeds, list):
608
- raise ValueError(
609
- f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
610
- )
611
- elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
612
- raise ValueError(
613
- f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
614
- )
615
-
616
- def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
617
- # get the original timestep using init_timestep
618
- if denoising_start is None:
619
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
620
- t_start = max(num_inference_steps - init_timestep, 0)
621
- else:
622
- t_start = 0
623
-
624
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
625
-
626
- # Strength is irrelevant if we directly request a timestep to start at;
627
- # that is, strength is determined by the denoising_start instead.
628
- if denoising_start is not None:
629
- discrete_timestep_cutoff = int(
630
- round(
631
- self.scheduler.config.num_train_timesteps
632
- - (denoising_start * self.scheduler.config.num_train_timesteps)
633
- )
634
- )
635
-
636
- num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
637
- if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
638
- # if the scheduler is a 2nd order scheduler we might have to do +1
639
- # because `num_inference_steps` might be even given that every timestep
640
- # (except the highest one) is duplicated. If `num_inference_steps` is even it would
641
- # mean that we cut the timesteps in the middle of the denoising step
642
- # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1
643
- # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
644
- num_inference_steps = num_inference_steps + 1
645
-
646
- # because t_n+1 >= t_n, we slice the timesteps starting from the end
647
- timesteps = timesteps[-num_inference_steps:]
648
- return timesteps, num_inference_steps
649
-
650
- return timesteps, num_inference_steps - t_start
651
-
652
- def prepare_latents(
653
- self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
654
- ):
655
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
656
- raise ValueError(
657
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
658
- )
659
-
660
- # Offload text encoder if `enable_model_cpu_offload` was enabled
661
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
662
- self.text_encoder_2.to("cpu")
663
- torch.cuda.empty_cache()
664
-
665
- image = image.to(device=device, dtype=dtype)
666
-
667
- batch_size = batch_size * num_images_per_prompt
668
-
669
- if image.shape[1] == 4:
670
- init_latents = image
671
-
672
- else:
673
- # make sure the VAE is in float32 mode, as it overflows in float16
674
- if self.vae.config.force_upcast:
675
- image = image.float()
676
- self.vae.to(dtype=torch.float32)
677
-
678
- if isinstance(generator, list) and len(generator) != batch_size:
679
- raise ValueError(
680
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
681
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
682
- )
683
-
684
- elif isinstance(generator, list):
685
- init_latents = [
686
- retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
687
- for i in range(batch_size)
688
- ]
689
- init_latents = torch.cat(init_latents, dim=0)
690
- else:
691
- init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
692
-
693
- if self.vae.config.force_upcast:
694
- self.vae.to(dtype)
695
-
696
- init_latents = init_latents.to(dtype)
697
- init_latents = self.vae.config.scaling_factor * init_latents
698
-
699
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
700
- # expand init_latents for batch_size
701
- additional_image_per_prompt = batch_size // init_latents.shape[0]
702
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
703
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
704
- raise ValueError(
705
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
706
- )
707
- else:
708
- init_latents = torch.cat([init_latents], dim=0)
709
-
710
- if add_noise:
711
- shape = init_latents.shape
712
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
713
- # get latents
714
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
715
-
716
- latents = init_latents
717
-
718
- return latents
719
-
720
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
721
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
722
- dtype = next(self.image_encoder.parameters()).dtype
723
-
724
- if not isinstance(image, torch.Tensor):
725
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
726
-
727
- image = image.to(device=device, dtype=dtype)
728
- if output_hidden_states:
729
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
730
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
731
- uncond_image_enc_hidden_states = self.image_encoder(
732
- torch.zeros_like(image), output_hidden_states=True
733
- ).hidden_states[-2]
734
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
735
- num_images_per_prompt, dim=0
736
- )
737
- return image_enc_hidden_states, uncond_image_enc_hidden_states
738
- else:
739
- image_embeds = self.image_encoder(image).image_embeds
740
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
741
- uncond_image_embeds = torch.zeros_like(image_embeds)
742
-
743
- return image_embeds, uncond_image_embeds
744
-
745
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
746
- def prepare_ip_adapter_image_embeds(
747
- self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
748
- ):
749
- if ip_adapter_image_embeds is None:
750
- if not isinstance(ip_adapter_image, list):
751
- ip_adapter_image = [ip_adapter_image]
752
-
753
- if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
754
- raise ValueError(
755
- f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
756
- )
757
-
758
- image_embeds = []
759
- for single_ip_adapter_image, image_proj_layer in zip(
760
- ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
761
- ):
762
- output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
763
- single_image_embeds, single_negative_image_embeds = self.encode_image(
764
- single_ip_adapter_image, device, 1, output_hidden_state
765
- )
766
- single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
767
- single_negative_image_embeds = torch.stack(
768
- [single_negative_image_embeds] * num_images_per_prompt, dim=0
769
- )
770
-
771
- if do_classifier_free_guidance:
772
- single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
773
- single_image_embeds = single_image_embeds.to(device)
774
-
775
- image_embeds.append(single_image_embeds)
776
- else:
777
- repeat_dims = [1]
778
- image_embeds = []
779
- for single_image_embeds in ip_adapter_image_embeds:
780
- if do_classifier_free_guidance:
781
- single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
782
- single_image_embeds = single_image_embeds.repeat(
783
- num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
784
- )
785
- single_negative_image_embeds = single_negative_image_embeds.repeat(
786
- num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
787
- )
788
- single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
789
- else:
790
- single_image_embeds = single_image_embeds.repeat(
791
- num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
792
- )
793
- image_embeds.append(single_image_embeds)
794
-
795
- return image_embeds
796
-
797
- def _get_add_time_ids(
798
- self,
799
- original_size,
800
- crops_coords_top_left,
801
- target_size,
802
- aesthetic_score,
803
- negative_aesthetic_score,
804
- negative_original_size,
805
- negative_crops_coords_top_left,
806
- negative_target_size,
807
- dtype,
808
- text_encoder_projection_dim=None,
809
- ):
810
- if self.config.requires_aesthetics_score:
811
- add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
812
- add_neg_time_ids = list(
813
- negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
814
- )
815
- else:
816
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
817
- add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
818
-
819
- passed_add_embed_dim = (
820
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
821
- )
822
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
823
-
824
- if (
825
- expected_add_embed_dim > passed_add_embed_dim
826
- and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
827
- ):
828
- raise ValueError(
829
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
830
- )
831
- elif (
832
- expected_add_embed_dim < passed_add_embed_dim
833
- and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
834
- ):
835
- raise ValueError(
836
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
837
- )
838
- elif expected_add_embed_dim != passed_add_embed_dim:
839
- raise ValueError(
840
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
841
- )
842
-
843
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
844
- add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
845
-
846
- return add_time_ids, add_neg_time_ids
847
-
848
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
849
- def upcast_vae(self):
850
- dtype = self.vae.dtype
851
- self.vae.to(dtype=torch.float32)
852
- use_torch_2_0_or_xformers = isinstance(
853
- self.vae.decoder.mid_block.attentions[0].processor,
854
- (
855
- AttnProcessor2_0,
856
- XFormersAttnProcessor,
857
- LoRAXFormersAttnProcessor,
858
- LoRAAttnProcessor2_0,
859
- ),
860
- )
861
- # if xformers or torch_2_0 is used attention block does not need
862
- # to be in float32 which can save lots of memory
863
- if use_torch_2_0_or_xformers:
864
- self.vae.post_quant_conv.to(dtype)
865
- self.vae.decoder.conv_in.to(dtype)
866
- self.vae.decoder.mid_block.to(dtype)
867
-
868
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
869
- def get_guidance_scale_embedding(
870
- self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
871
- ) -> torch.Tensor:
872
- """
873
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
874
-
875
- Args:
876
- w (`torch.Tensor`):
877
- Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
878
- embedding_dim (`int`, *optional*, defaults to 512):
879
- Dimension of the embeddings to generate.
880
- dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
881
- Data type of the generated embeddings.
882
-
883
- Returns:
884
- `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
885
- """
886
- assert len(w.shape) == 1
887
- w = w * 1000.0
888
-
889
- half_dim = embedding_dim // 2
890
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
891
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
892
- emb = w.to(dtype)[:, None] * emb[None, :]
893
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
894
- if embedding_dim % 2 == 1: # zero pad
895
- emb = torch.nn.functional.pad(emb, (0, 1))
896
- assert emb.shape == (w.shape[0], embedding_dim)
897
- return emb
898
-
899
- @property
900
- def guidance_scale(self):
901
- return self._guidance_scale
902
-
903
- @property
904
- def guidance_rescale(self):
905
- return self._guidance_rescale
906
-
907
- @property
908
- def clip_skip(self):
909
- return self._clip_skip
910
-
911
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
912
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
913
- # corresponds to doing no classifier free guidance.
914
- @property
915
- def do_classifier_free_guidance(self):
916
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
917
-
918
- @property
919
- def cross_attention_kwargs(self):
920
- return self._cross_attention_kwargs
921
-
922
- @property
923
- def denoising_end(self):
924
- return self._denoising_end
925
-
926
- @property
927
- def denoising_start(self):
928
- return self._denoising_start
929
-
930
- @property
931
- def num_timesteps(self):
932
- return self._num_timesteps
933
-
934
- @property
935
- def interrupt(self):
936
- return self._interrupt
937
-
938
- @torch.no_grad()
939
- @replace_example_docstring(EXAMPLE_DOC_STRING)
940
- def __call__(
941
- self,
942
- prompt: Union[str, List[str]] = None,
943
- prompt_2: Optional[Union[str, List[str]]] = None,
944
- image: Union[
945
- torch.Tensor,
946
- PIL.Image.Image,
947
- np.ndarray,
948
- List[torch.Tensor],
949
- List[PIL.Image.Image],
950
- List[np.ndarray],
951
- ] = None,
952
- strength: float = 0.3,
953
- num_inference_steps: int = 50,
954
- timesteps: List[int] = None,
955
- denoising_start: Optional[float] = None,
956
- denoising_end: Optional[float] = None,
957
- guidance_scale: float = 5.0,
958
- negative_prompt: Optional[Union[str, List[str]]] = None,
959
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
960
- num_images_per_prompt: Optional[int] = 1,
961
- eta: float = 0.0,
962
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
963
- latents: Optional[torch.Tensor] = None,
964
- prompt_embeds: Optional[torch.Tensor] = None,
965
- negative_prompt_embeds: Optional[torch.Tensor] = None,
966
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
967
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
968
- ip_adapter_image: Optional[PipelineImageInput] = None,
969
- ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
970
- output_type: Optional[str] = "pil",
971
- return_dict: bool = True,
972
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
973
- guidance_rescale: float = 0.0,
974
- original_size: Tuple[int, int] = None,
975
- crops_coords_top_left: Tuple[int, int] = (0, 0),
976
- target_size: Tuple[int, int] = None,
977
- negative_original_size: Optional[Tuple[int, int]] = None,
978
- negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
979
- negative_target_size: Optional[Tuple[int, int]] = None,
980
- aesthetic_score: float = 6.0,
981
- negative_aesthetic_score: float = 2.5,
982
- clip_skip: Optional[int] = None,
983
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
984
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
985
- map: torch.Tensor = None,
986
- original_image: Union[
987
- torch.Tensor,
988
- PIL.Image.Image,
989
- np.ndarray,
990
- List[torch.Tensor],
991
- List[PIL.Image.Image],
992
- List[np.ndarray],
993
- ] = None,
994
- **kwargs,
995
- ):
996
- r"""
997
- Function invoked when calling the pipeline for generation.
998
-
999
- Args:
1000
- prompt (`str` or `List[str]`, *optional*):
1001
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1002
- instead.
1003
- prompt_2 (`str` or `List[str]`, *optional*):
1004
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1005
- used in both text-encoders
1006
- image (`torch.Tensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`):
1007
- The image(s) to modify with the pipeline.
1008
- strength (`float`, *optional*, defaults to 0.3):
1009
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
1010
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
1011
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
1012
- be maximum and the denoising process will run for the full number of iterations specified in
1013
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of
1014
- `denoising_start` being declared as an integer, the value of `strength` will be ignored.
1015
- num_inference_steps (`int`, *optional*, defaults to 50):
1016
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1017
- expense of slower inference.
1018
- denoising_start (`float`, *optional*):
1019
- When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
1020
- bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
1021
- it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
1022
- strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
1023
- is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
1024
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1025
- denoising_end (`float`, *optional*):
1026
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1027
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
1028
- still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
1029
- denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
1030
- final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
1031
- forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1032
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1033
- guidance_scale (`float`, *optional*, defaults to 7.5):
1034
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1035
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
1036
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1037
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1038
- usually at the expense of lower image quality.
1039
- negative_prompt (`str` or `List[str]`, *optional*):
1040
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
1041
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1042
- less than `1`).
1043
- negative_prompt_2 (`str` or `List[str]`, *optional*):
1044
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1045
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1046
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1047
- The number of images to generate per prompt.
1048
- eta (`float`, *optional*, defaults to 0.0):
1049
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1050
- [`schedulers.DDIMScheduler`], will be ignored for others.
1051
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1052
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1053
- to make generation deterministic.
1054
- latents (`torch.Tensor`, *optional*):
1055
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1056
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1057
- tensor will ge generated by sampling using the supplied random `generator`.
1058
- prompt_embeds (`torch.Tensor`, *optional*):
1059
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1060
- provided, text embeddings will be generated from `prompt` input argument.
1061
- negative_prompt_embeds (`torch.Tensor`, *optional*):
1062
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1063
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1064
- argument.
1065
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
1066
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1067
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
1068
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
1069
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1070
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1071
- input argument.
1072
- ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1073
- ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
1074
- Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
1075
- Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
1076
- if `do_classifier_free_guidance` is set to `True`.
1077
- If not provided, embeddings are computed from the `ip_adapter_image` input argument.
1078
- output_type (`str`, *optional*, defaults to `"pil"`):
1079
- The output format of the generate image. Choose between
1080
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1081
- return_dict (`bool`, *optional*, defaults to `True`):
1082
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a
1083
- plain tuple.
1084
- callback (`Callable`, *optional*):
1085
- A function that will be called every `callback_steps` steps during inference. The function will be
1086
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
1087
- callback_steps (`int`, *optional*, defaults to 1):
1088
- The frequency at which the `callback` function will be called. If not specified, the callback will be
1089
- called at every step.
1090
- cross_attention_kwargs (`dict`, *optional*):
1091
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1092
- `self.processor` in
1093
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
1094
- guidance_rescale (`float`, *optional*, defaults to 0.7):
1095
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1096
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1097
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1098
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
1099
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1100
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1101
- `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
1102
- explained in section 2.2 of
1103
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1104
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1105
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1106
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1107
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1108
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1109
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1110
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
1111
- not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
1112
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1113
- aesthetic_score (`float`, *optional*, defaults to 6.0):
1114
- Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1115
- Part of SDXL's micro-conditioning as explained in section 2.2 of
1116
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1117
- negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1118
- Part of SDXL's micro-conditioning as explained in section 2.2 of
1119
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1120
- simulate an aesthetic score of the generated image by influencing the negative text condition.
1121
-
1122
- Examples:
1123
-
1124
- Returns:
1125
- [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
1126
- [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
1127
- `tuple. When returning a tuple, the first element is a list with the generated images.
1128
- """
1129
-
1130
- callback = kwargs.pop("callback", None)
1131
- callback_steps = kwargs.pop("callback_steps", None)
1132
-
1133
- if callback is not None:
1134
- deprecate(
1135
- "callback",
1136
- "1.0.0",
1137
- "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
1138
- )
1139
- if callback_steps is not None:
1140
- deprecate(
1141
- "callback_steps",
1142
- "1.0.0",
1143
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
1144
- )
1145
-
1146
- # 1. Check inputs. Raise error if not correct
1147
- self.check_inputs(
1148
- prompt,
1149
- prompt_2,
1150
- strength,
1151
- num_inference_steps,
1152
- callback_steps,
1153
- negative_prompt,
1154
- negative_prompt_2,
1155
- prompt_embeds,
1156
- negative_prompt_embeds,
1157
- ip_adapter_image,
1158
- ip_adapter_image_embeds,
1159
- callback_on_step_end_tensor_inputs,
1160
- )
1161
-
1162
- self._guidance_scale = guidance_scale
1163
- self._guidance_rescale = guidance_rescale
1164
- self._clip_skip = clip_skip
1165
- self._cross_attention_kwargs = cross_attention_kwargs
1166
- self._denoising_end = denoising_end
1167
- self._denoising_start = denoising_start
1168
- self._interrupt = False
1169
-
1170
- # 2. Define call parameters
1171
- if prompt is not None and isinstance(prompt, str):
1172
- batch_size = 1
1173
- elif prompt is not None and isinstance(prompt, list):
1174
- batch_size = len(prompt)
1175
- else:
1176
- batch_size = prompt_embeds.shape[0]
1177
-
1178
- device = self._execution_device
1179
-
1180
- # 3. Encode input prompt
1181
- text_encoder_lora_scale = (
1182
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
1183
- )
1184
- (
1185
- prompt_embeds,
1186
- negative_prompt_embeds,
1187
- pooled_prompt_embeds,
1188
- negative_pooled_prompt_embeds,
1189
- ) = self.encode_prompt(
1190
- prompt=prompt,
1191
- prompt_2=prompt_2,
1192
- device=device,
1193
- num_images_per_prompt=num_images_per_prompt,
1194
- do_classifier_free_guidance=self.do_classifier_free_guidance,
1195
- negative_prompt=negative_prompt,
1196
- negative_prompt_2=negative_prompt_2,
1197
- prompt_embeds=prompt_embeds,
1198
- negative_prompt_embeds=negative_prompt_embeds,
1199
- pooled_prompt_embeds=pooled_prompt_embeds,
1200
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1201
- lora_scale=text_encoder_lora_scale,
1202
- )
1203
-
1204
- # 4. Preprocess image
1205
- # image = self.image_processor.preprocess(image) #ideally we would have preprocess the image with diffusers, but for this POC we won't --- it throws a deprecated warning
1206
- map = torchvision.transforms.Resize(
1207
- tuple(s // self.vae_scale_factor for s in original_image.shape[2:]), antialias=None
1208
- )(map)
1209
-
1210
- # 5. Prepare timesteps
1211
- def denoising_value_valid(dnv):
1212
- return isinstance(dnv, float) and 0 < dnv < 1
1213
-
1214
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1215
-
1216
- # begin diff diff change
1217
- total_time_steps = num_inference_steps
1218
- # end diff diff change
1219
-
1220
- timesteps, num_inference_steps = self.get_timesteps(
1221
- num_inference_steps,
1222
- strength,
1223
- device,
1224
- denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None,
1225
- )
1226
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1227
-
1228
- add_noise = True if denoising_start is None else False
1229
- # 6. Prepare latent variables
1230
- latents = self.prepare_latents(
1231
- image,
1232
- latent_timestep,
1233
- batch_size,
1234
- num_images_per_prompt,
1235
- prompt_embeds.dtype,
1236
- device,
1237
- generator,
1238
- add_noise,
1239
- )
1240
- # 7. Prepare extra step kwargs.
1241
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1242
-
1243
- height, width = latents.shape[-2:]
1244
- height = height * self.vae_scale_factor
1245
- width = width * self.vae_scale_factor
1246
-
1247
- original_size = original_size or (height, width)
1248
- target_size = target_size or (height, width)
1249
-
1250
- # 8. Prepare added time ids & embeddings
1251
- if negative_original_size is None:
1252
- negative_original_size = original_size
1253
- if negative_target_size is None:
1254
- negative_target_size = target_size
1255
-
1256
- add_text_embeds = pooled_prompt_embeds
1257
- if self.text_encoder_2 is None:
1258
- text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1259
- else:
1260
- text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1261
-
1262
- add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1263
- original_size,
1264
- crops_coords_top_left,
1265
- target_size,
1266
- aesthetic_score,
1267
- negative_aesthetic_score,
1268
- negative_original_size,
1269
- negative_crops_coords_top_left,
1270
- negative_target_size,
1271
- dtype=prompt_embeds.dtype,
1272
- text_encoder_projection_dim=text_encoder_projection_dim,
1273
- )
1274
- add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1275
-
1276
- if self.do_classifier_free_guidance:
1277
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1278
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1279
- add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1280
- add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
1281
-
1282
- prompt_embeds = prompt_embeds.to(device)
1283
- add_text_embeds = add_text_embeds.to(device)
1284
- add_time_ids = add_time_ids.to(device)
1285
-
1286
- if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1287
- image_embeds = self.prepare_ip_adapter_image_embeds(
1288
- ip_adapter_image,
1289
- ip_adapter_image_embeds,
1290
- device,
1291
- batch_size * num_images_per_prompt,
1292
- self.do_classifier_free_guidance,
1293
- )
1294
-
1295
- # 9. Denoising loop
1296
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1297
-
1298
- # 9.1 Apply denoising_end
1299
- if (
1300
- denoising_end is not None
1301
- and denoising_start is not None
1302
- and denoising_value_valid(denoising_end)
1303
- and denoising_value_valid(denoising_start)
1304
- and denoising_start >= denoising_end
1305
- ):
1306
- raise ValueError(
1307
- f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
1308
- + f" {denoising_end} when using type float."
1309
- )
1310
- elif denoising_end is not None and denoising_value_valid(denoising_end):
1311
- discrete_timestep_cutoff = int(
1312
- round(
1313
- self.scheduler.config.num_train_timesteps
1314
- - (denoising_end * self.scheduler.config.num_train_timesteps)
1315
- )
1316
- )
1317
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1318
- timesteps = timesteps[:num_inference_steps]
1319
-
1320
- # preparations for diff diff
1321
- original_with_noise = self.prepare_latents(
1322
- original_image, timesteps, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
1323
- )
1324
- thresholds = torch.arange(total_time_steps, dtype=map.dtype) / total_time_steps
1325
- thresholds = thresholds.unsqueeze(1).unsqueeze(1).to(device)
1326
- masks = map > (thresholds + (denoising_start or 0))
1327
- # end diff diff preparations
1328
-
1329
- # 9.2 Optionally get Guidance Scale Embedding
1330
- timestep_cond = None
1331
- if self.unet.config.time_cond_proj_dim is not None:
1332
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1333
- timestep_cond = self.get_guidance_scale_embedding(
1334
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1335
- ).to(device=device, dtype=latents.dtype)
1336
-
1337
- self._num_timesteps = len(timesteps)
1338
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1339
- for i, t in enumerate(timesteps):
1340
- if self.interrupt:
1341
- continue
1342
-
1343
- # diff diff
1344
- if i == 0 and denoising_start is None:
1345
- latents = original_with_noise[:1]
1346
- else:
1347
- mask = masks[i].unsqueeze(0)
1348
- # cast mask to the same type as latents etc
1349
- mask = mask.to(latents.dtype)
1350
- mask = mask.unsqueeze(1) # fit shape
1351
- latents = original_with_noise[i] * mask + latents * (1 - mask)
1352
- # end diff diff
1353
-
1354
- # expand the latents if we are doing classifier free guidance
1355
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1356
-
1357
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1358
-
1359
- # predict the noise residual
1360
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1361
- if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1362
- added_cond_kwargs["image_embeds"] = image_embeds
1363
- noise_pred = self.unet(
1364
- latent_model_input,
1365
- t,
1366
- encoder_hidden_states=prompt_embeds,
1367
- timestep_cond=timestep_cond,
1368
- cross_attention_kwargs=cross_attention_kwargs,
1369
- added_cond_kwargs=added_cond_kwargs,
1370
- return_dict=False,
1371
- )[0]
1372
-
1373
- # perform guidance
1374
- if self.do_classifier_free_guidance:
1375
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1376
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1377
-
1378
- if self.do_classifier_free_guidance and guidance_rescale > 0.0:
1379
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1380
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1381
-
1382
- # compute the previous noisy sample x_t -> x_t-1
1383
- latents_dtype = latents.dtype
1384
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1385
- if latents.dtype != latents_dtype:
1386
- if torch.backends.mps.is_available():
1387
- # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
1388
- latents = latents.to(latents_dtype)
1389
- else:
1390
- raise ValueError(
1391
- "For the given accelerator, there seems to be an unexpected problem in type-casting. Please file an issue on the PyTorch GitHub repository. See also: https://github.com/huggingface/diffusers/pull/7446/."
1392
- )
1393
-
1394
- if callback_on_step_end is not None:
1395
- callback_kwargs = {}
1396
- for k in callback_on_step_end_tensor_inputs:
1397
- callback_kwargs[k] = locals()[k]
1398
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1399
-
1400
- latents = callback_outputs.pop("latents", latents)
1401
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1402
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1403
- add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
1404
- negative_pooled_prompt_embeds = callback_outputs.pop(
1405
- "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1406
- )
1407
- add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
1408
- add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids)
1409
-
1410
- # call the callback, if provided
1411
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1412
- progress_bar.update()
1413
- if callback is not None and i % callback_steps == 0:
1414
- step_idx = i // getattr(self.scheduler, "order", 1)
1415
- callback(step_idx, t, latents)
1416
-
1417
- if XLA_AVAILABLE:
1418
- xm.mark_step()
1419
-
1420
- if not output_type == "latent":
1421
- # make sure the VAE is in float32 mode, as it overflows in float16
1422
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1423
-
1424
- if needs_upcasting:
1425
- self.upcast_vae()
1426
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1427
- elif latents.dtype != self.vae.dtype:
1428
- if torch.backends.mps.is_available():
1429
- # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
1430
- self.vae = self.vae.to(latents.dtype)
1431
- else:
1432
- raise ValueError(
1433
- "For the given accelerator, there seems to be an unexpected problem in type-casting. Please file an issue on the PyTorch GitHub repository. See also: https://github.com/huggingface/diffusers/pull/7446/."
1434
- )
1435
- # unscale/denormalize the latents
1436
- # denormalize with the mean and std if available and not None
1437
- has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
1438
- has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
1439
- if has_latents_mean and has_latents_std:
1440
- latents_mean = (
1441
- torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
1442
- )
1443
- latents_std = (
1444
- torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
1445
- )
1446
- latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
1447
- else:
1448
- latents = latents / self.vae.config.scaling_factor
1449
-
1450
- image = self.vae.decode(latents, return_dict=False)[0]
1451
-
1452
- # cast back to fp16 if needed
1453
- if needs_upcasting:
1454
- self.vae.to(dtype=torch.float16)
1455
- else:
1456
- image = latents
1457
-
1458
- # apply watermark if available
1459
- if self.watermark is not None:
1460
- image = self.watermark.apply_watermark(image)
1461
-
1462
- image = self.image_processor.postprocess(image, output_type=output_type)
1463
-
1464
- # Offload all models
1465
- self.maybe_free_model_hooks()
1466
-
1467
- if not return_dict:
1468
- return (image,)
1469
-
1470
- return StableDiffusionXLPipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_xl_instandid_img2img.py DELETED
@@ -1,1077 +0,0 @@
1
- # Copyright 2024 The InstantX Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import math
17
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
-
19
- import cv2
20
- import numpy as np
21
- import PIL.Image
22
- import torch
23
- import torch.nn as nn
24
-
25
- from diffusers import StableDiffusionXLControlNetImg2ImgPipeline
26
- from diffusers.image_processor import PipelineImageInput
27
- from diffusers.models import ControlNetModel
28
- from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
29
- from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
30
- from diffusers.utils import (
31
- deprecate,
32
- logging,
33
- replace_example_docstring,
34
- )
35
- from diffusers.utils.import_utils import is_xformers_available
36
- from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
37
-
38
-
39
- try:
40
- import xformers
41
- import xformers.ops
42
-
43
- xformers_available = True
44
- except Exception:
45
- xformers_available = False
46
-
47
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
48
-
49
- logger.warning(
50
- "To use instant id pipelines, please make sure you have the `insightface` library installed: `pip install insightface`."
51
- "Please refer to: https://huggingface.co/InstantX/InstantID for further instructions regarding inference"
52
- )
53
-
54
-
55
- def FeedForward(dim, mult=4):
56
- inner_dim = int(dim * mult)
57
- return nn.Sequential(
58
- nn.LayerNorm(dim),
59
- nn.Linear(dim, inner_dim, bias=False),
60
- nn.GELU(),
61
- nn.Linear(inner_dim, dim, bias=False),
62
- )
63
-
64
-
65
- def reshape_tensor(x, heads):
66
- bs, length, width = x.shape
67
- # (bs, length, width) --> (bs, length, n_heads, dim_per_head)
68
- x = x.view(bs, length, heads, -1)
69
- # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
70
- x = x.transpose(1, 2)
71
- # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
72
- x = x.reshape(bs, heads, length, -1)
73
- return x
74
-
75
-
76
- class PerceiverAttention(nn.Module):
77
- def __init__(self, *, dim, dim_head=64, heads=8):
78
- super().__init__()
79
- self.scale = dim_head**-0.5
80
- self.dim_head = dim_head
81
- self.heads = heads
82
- inner_dim = dim_head * heads
83
-
84
- self.norm1 = nn.LayerNorm(dim)
85
- self.norm2 = nn.LayerNorm(dim)
86
-
87
- self.to_q = nn.Linear(dim, inner_dim, bias=False)
88
- self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
89
- self.to_out = nn.Linear(inner_dim, dim, bias=False)
90
-
91
- def forward(self, x, latents):
92
- """
93
- Args:
94
- x (torch.Tensor): image features
95
- shape (b, n1, D)
96
- latent (torch.Tensor): latent features
97
- shape (b, n2, D)
98
- """
99
- x = self.norm1(x)
100
- latents = self.norm2(latents)
101
-
102
- b, l, _ = latents.shape
103
-
104
- q = self.to_q(latents)
105
- kv_input = torch.cat((x, latents), dim=-2)
106
- k, v = self.to_kv(kv_input).chunk(2, dim=-1)
107
-
108
- q = reshape_tensor(q, self.heads)
109
- k = reshape_tensor(k, self.heads)
110
- v = reshape_tensor(v, self.heads)
111
-
112
- # attention
113
- scale = 1 / math.sqrt(math.sqrt(self.dim_head))
114
- weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
115
- weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
116
- out = weight @ v
117
-
118
- out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
119
-
120
- return self.to_out(out)
121
-
122
-
123
- class Resampler(nn.Module):
124
- def __init__(
125
- self,
126
- dim=1024,
127
- depth=8,
128
- dim_head=64,
129
- heads=16,
130
- num_queries=8,
131
- embedding_dim=768,
132
- output_dim=1024,
133
- ff_mult=4,
134
- ):
135
- super().__init__()
136
-
137
- self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
138
-
139
- self.proj_in = nn.Linear(embedding_dim, dim)
140
-
141
- self.proj_out = nn.Linear(dim, output_dim)
142
- self.norm_out = nn.LayerNorm(output_dim)
143
-
144
- self.layers = nn.ModuleList([])
145
- for _ in range(depth):
146
- self.layers.append(
147
- nn.ModuleList(
148
- [
149
- PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
150
- FeedForward(dim=dim, mult=ff_mult),
151
- ]
152
- )
153
- )
154
-
155
- def forward(self, x):
156
- latents = self.latents.repeat(x.size(0), 1, 1)
157
- x = self.proj_in(x)
158
-
159
- for attn, ff in self.layers:
160
- latents = attn(x, latents) + latents
161
- latents = ff(latents) + latents
162
-
163
- latents = self.proj_out(latents)
164
- return self.norm_out(latents)
165
-
166
-
167
- class AttnProcessor(nn.Module):
168
- r"""
169
- Default processor for performing attention-related computations.
170
- """
171
-
172
- def __init__(
173
- self,
174
- hidden_size=None,
175
- cross_attention_dim=None,
176
- ):
177
- super().__init__()
178
-
179
- def __call__(
180
- self,
181
- attn,
182
- hidden_states,
183
- encoder_hidden_states=None,
184
- attention_mask=None,
185
- temb=None,
186
- ):
187
- residual = hidden_states
188
-
189
- if attn.spatial_norm is not None:
190
- hidden_states = attn.spatial_norm(hidden_states, temb)
191
-
192
- input_ndim = hidden_states.ndim
193
-
194
- if input_ndim == 4:
195
- batch_size, channel, height, width = hidden_states.shape
196
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
197
-
198
- batch_size, sequence_length, _ = (
199
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
200
- )
201
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
202
-
203
- if attn.group_norm is not None:
204
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
205
-
206
- query = attn.to_q(hidden_states)
207
-
208
- if encoder_hidden_states is None:
209
- encoder_hidden_states = hidden_states
210
- elif attn.norm_cross:
211
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
212
-
213
- key = attn.to_k(encoder_hidden_states)
214
- value = attn.to_v(encoder_hidden_states)
215
-
216
- query = attn.head_to_batch_dim(query)
217
- key = attn.head_to_batch_dim(key)
218
- value = attn.head_to_batch_dim(value)
219
-
220
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
221
- hidden_states = torch.bmm(attention_probs, value)
222
- hidden_states = attn.batch_to_head_dim(hidden_states)
223
-
224
- # linear proj
225
- hidden_states = attn.to_out[0](hidden_states)
226
- # dropout
227
- hidden_states = attn.to_out[1](hidden_states)
228
-
229
- if input_ndim == 4:
230
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
231
-
232
- if attn.residual_connection:
233
- hidden_states = hidden_states + residual
234
-
235
- hidden_states = hidden_states / attn.rescale_output_factor
236
-
237
- return hidden_states
238
-
239
-
240
- class IPAttnProcessor(nn.Module):
241
- r"""
242
- Attention processor for IP-Adapater.
243
- Args:
244
- hidden_size (`int`):
245
- The hidden size of the attention layer.
246
- cross_attention_dim (`int`):
247
- The number of channels in the `encoder_hidden_states`.
248
- scale (`float`, defaults to 1.0):
249
- the weight scale of image prompt.
250
- num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
251
- The context length of the image features.
252
- """
253
-
254
- def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
255
- super().__init__()
256
-
257
- self.hidden_size = hidden_size
258
- self.cross_attention_dim = cross_attention_dim
259
- self.scale = scale
260
- self.num_tokens = num_tokens
261
-
262
- self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
263
- self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
264
-
265
- def __call__(
266
- self,
267
- attn,
268
- hidden_states,
269
- encoder_hidden_states=None,
270
- attention_mask=None,
271
- temb=None,
272
- ):
273
- residual = hidden_states
274
-
275
- if attn.spatial_norm is not None:
276
- hidden_states = attn.spatial_norm(hidden_states, temb)
277
-
278
- input_ndim = hidden_states.ndim
279
-
280
- if input_ndim == 4:
281
- batch_size, channel, height, width = hidden_states.shape
282
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
283
-
284
- batch_size, sequence_length, _ = (
285
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
286
- )
287
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
288
-
289
- if attn.group_norm is not None:
290
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
291
-
292
- query = attn.to_q(hidden_states)
293
-
294
- if encoder_hidden_states is None:
295
- encoder_hidden_states = hidden_states
296
- else:
297
- # get encoder_hidden_states, ip_hidden_states
298
- end_pos = encoder_hidden_states.shape[1] - self.num_tokens
299
- encoder_hidden_states, ip_hidden_states = (
300
- encoder_hidden_states[:, :end_pos, :],
301
- encoder_hidden_states[:, end_pos:, :],
302
- )
303
- if attn.norm_cross:
304
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
305
-
306
- key = attn.to_k(encoder_hidden_states)
307
- value = attn.to_v(encoder_hidden_states)
308
-
309
- query = attn.head_to_batch_dim(query)
310
- key = attn.head_to_batch_dim(key)
311
- value = attn.head_to_batch_dim(value)
312
-
313
- if xformers_available:
314
- hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
315
- else:
316
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
317
- hidden_states = torch.bmm(attention_probs, value)
318
- hidden_states = attn.batch_to_head_dim(hidden_states)
319
-
320
- # for ip-adapter
321
- ip_key = self.to_k_ip(ip_hidden_states)
322
- ip_value = self.to_v_ip(ip_hidden_states)
323
-
324
- ip_key = attn.head_to_batch_dim(ip_key)
325
- ip_value = attn.head_to_batch_dim(ip_value)
326
-
327
- if xformers_available:
328
- ip_hidden_states = self._memory_efficient_attention_xformers(query, ip_key, ip_value, None)
329
- else:
330
- ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
331
- ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
332
- ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
333
-
334
- hidden_states = hidden_states + self.scale * ip_hidden_states
335
-
336
- # linear proj
337
- hidden_states = attn.to_out[0](hidden_states)
338
- # dropout
339
- hidden_states = attn.to_out[1](hidden_states)
340
-
341
- if input_ndim == 4:
342
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
343
-
344
- if attn.residual_connection:
345
- hidden_states = hidden_states + residual
346
-
347
- hidden_states = hidden_states / attn.rescale_output_factor
348
-
349
- return hidden_states
350
-
351
- def _memory_efficient_attention_xformers(self, query, key, value, attention_mask):
352
- # TODO attention_mask
353
- query = query.contiguous()
354
- key = key.contiguous()
355
- value = value.contiguous()
356
- hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
357
- return hidden_states
358
-
359
-
360
- EXAMPLE_DOC_STRING = """
361
- Examples:
362
- ```py
363
- >>> # !pip install opencv-python transformers accelerate insightface
364
- >>> import diffusers
365
- >>> from diffusers.utils import load_image
366
- >>> from diffusers.models import ControlNetModel
367
-
368
- >>> import cv2
369
- >>> import torch
370
- >>> import numpy as np
371
- >>> from PIL import Image
372
-
373
- >>> from insightface.app import FaceAnalysis
374
- >>> from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps
375
-
376
- >>> # download 'antelopev2' under ./models
377
- >>> app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
378
- >>> app.prepare(ctx_id=0, det_size=(640, 640))
379
-
380
- >>> # download models under ./checkpoints
381
- >>> face_adapter = f'./checkpoints/ip-adapter.bin'
382
- >>> controlnet_path = f'./checkpoints/ControlNetModel'
383
-
384
- >>> # load IdentityNet
385
- >>> controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
386
-
387
- >>> pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
388
- ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16
389
- ... )
390
- >>> pipe.cuda()
391
-
392
- >>> # load adapter
393
- >>> pipe.load_ip_adapter_instantid(face_adapter)
394
-
395
- >>> prompt = "analog film photo of a man. faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage, masterpiece, best quality"
396
- >>> negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured"
397
-
398
- >>> # load an image
399
- >>> image = load_image("your-example.jpg")
400
-
401
- >>> face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))[-1]
402
- >>> face_emb = face_info['embedding']
403
- >>> face_kps = draw_kps(face_image, face_info['kps'])
404
-
405
- >>> pipe.set_ip_adapter_scale(0.8)
406
-
407
- >>> # generate image
408
- >>> image = pipe(
409
- ... prompt, image_embeds=face_emb, image=face_kps, controlnet_conditioning_scale=0.8
410
- ... ).images[0]
411
- ```
412
- """
413
-
414
-
415
- def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]):
416
- stickwidth = 4
417
- limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
418
- kps = np.array(kps)
419
-
420
- w, h = image_pil.size
421
- out_img = np.zeros([h, w, 3])
422
-
423
- for i in range(len(limbSeq)):
424
- index = limbSeq[i]
425
- color = color_list[index[0]]
426
-
427
- x = kps[index][:, 0]
428
- y = kps[index][:, 1]
429
- length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
430
- angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
431
- polygon = cv2.ellipse2Poly(
432
- (int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1
433
- )
434
- out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
435
- out_img = (out_img * 0.6).astype(np.uint8)
436
-
437
- for idx_kp, kp in enumerate(kps):
438
- color = color_list[idx_kp]
439
- x, y = kp
440
- out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
441
-
442
- out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
443
- return out_img_pil
444
-
445
-
446
- class StableDiffusionXLInstantIDImg2ImgPipeline(StableDiffusionXLControlNetImg2ImgPipeline):
447
- def cuda(self, dtype=torch.float16, use_xformers=False):
448
- self.to("cuda", dtype)
449
-
450
- if hasattr(self, "image_proj_model"):
451
- self.image_proj_model.to(self.unet.device).to(self.unet.dtype)
452
-
453
- if use_xformers:
454
- if is_xformers_available():
455
- import xformers
456
- from packaging import version
457
-
458
- xformers_version = version.parse(xformers.__version__)
459
- if xformers_version == version.parse("0.0.16"):
460
- logger.warning(
461
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
462
- )
463
- self.enable_xformers_memory_efficient_attention()
464
- else:
465
- raise ValueError("xformers is not available. Make sure it is installed correctly")
466
-
467
- def load_ip_adapter_instantid(self, model_ckpt, image_emb_dim=512, num_tokens=16, scale=0.5):
468
- self.set_image_proj_model(model_ckpt, image_emb_dim, num_tokens)
469
- self.set_ip_adapter(model_ckpt, num_tokens, scale)
470
-
471
- def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16):
472
- image_proj_model = Resampler(
473
- dim=1280,
474
- depth=4,
475
- dim_head=64,
476
- heads=20,
477
- num_queries=num_tokens,
478
- embedding_dim=image_emb_dim,
479
- output_dim=self.unet.config.cross_attention_dim,
480
- ff_mult=4,
481
- )
482
-
483
- image_proj_model.eval()
484
-
485
- self.image_proj_model = image_proj_model.to(self.device, dtype=self.dtype)
486
- state_dict = torch.load(model_ckpt, map_location="cpu")
487
- if "image_proj" in state_dict:
488
- state_dict = state_dict["image_proj"]
489
- self.image_proj_model.load_state_dict(state_dict)
490
-
491
- self.image_proj_model_in_features = image_emb_dim
492
-
493
- def set_ip_adapter(self, model_ckpt, num_tokens, scale):
494
- unet = self.unet
495
- attn_procs = {}
496
- for name in unet.attn_processors.keys():
497
- cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
498
- if name.startswith("mid_block"):
499
- hidden_size = unet.config.block_out_channels[-1]
500
- elif name.startswith("up_blocks"):
501
- block_id = int(name[len("up_blocks.")])
502
- hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
503
- elif name.startswith("down_blocks"):
504
- block_id = int(name[len("down_blocks.")])
505
- hidden_size = unet.config.block_out_channels[block_id]
506
- if cross_attention_dim is None:
507
- attn_procs[name] = AttnProcessor().to(unet.device, dtype=unet.dtype)
508
- else:
509
- attn_procs[name] = IPAttnProcessor(
510
- hidden_size=hidden_size,
511
- cross_attention_dim=cross_attention_dim,
512
- scale=scale,
513
- num_tokens=num_tokens,
514
- ).to(unet.device, dtype=unet.dtype)
515
- unet.set_attn_processor(attn_procs)
516
-
517
- state_dict = torch.load(model_ckpt, map_location="cpu")
518
- ip_layers = torch.nn.ModuleList(self.unet.attn_processors.values())
519
- if "ip_adapter" in state_dict:
520
- state_dict = state_dict["ip_adapter"]
521
- ip_layers.load_state_dict(state_dict)
522
-
523
- def set_ip_adapter_scale(self, scale):
524
- unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
525
- for attn_processor in unet.attn_processors.values():
526
- if isinstance(attn_processor, IPAttnProcessor):
527
- attn_processor.scale = scale
528
-
529
- def _encode_prompt_image_emb(self, prompt_image_emb, device, dtype, do_classifier_free_guidance):
530
- if isinstance(prompt_image_emb, torch.Tensor):
531
- prompt_image_emb = prompt_image_emb.clone().detach()
532
- else:
533
- prompt_image_emb = torch.tensor(prompt_image_emb)
534
-
535
- prompt_image_emb = prompt_image_emb.to(device=device, dtype=dtype)
536
- prompt_image_emb = prompt_image_emb.reshape([1, -1, self.image_proj_model_in_features])
537
-
538
- if do_classifier_free_guidance:
539
- prompt_image_emb = torch.cat([torch.zeros_like(prompt_image_emb), prompt_image_emb], dim=0)
540
- else:
541
- prompt_image_emb = torch.cat([prompt_image_emb], dim=0)
542
-
543
- prompt_image_emb = self.image_proj_model(prompt_image_emb)
544
- return prompt_image_emb
545
-
546
- @torch.no_grad()
547
- @replace_example_docstring(EXAMPLE_DOC_STRING)
548
- def __call__(
549
- self,
550
- prompt: Union[str, List[str]] = None,
551
- prompt_2: Optional[Union[str, List[str]]] = None,
552
- image: PipelineImageInput = None,
553
- control_image: PipelineImageInput = None,
554
- strength: float = 0.8,
555
- height: Optional[int] = None,
556
- width: Optional[int] = None,
557
- num_inference_steps: int = 50,
558
- guidance_scale: float = 5.0,
559
- negative_prompt: Optional[Union[str, List[str]]] = None,
560
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
561
- num_images_per_prompt: Optional[int] = 1,
562
- eta: float = 0.0,
563
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
564
- latents: Optional[torch.Tensor] = None,
565
- prompt_embeds: Optional[torch.Tensor] = None,
566
- negative_prompt_embeds: Optional[torch.Tensor] = None,
567
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
568
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
569
- image_embeds: Optional[torch.Tensor] = None,
570
- output_type: Optional[str] = "pil",
571
- return_dict: bool = True,
572
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
573
- controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
574
- guess_mode: bool = False,
575
- control_guidance_start: Union[float, List[float]] = 0.0,
576
- control_guidance_end: Union[float, List[float]] = 1.0,
577
- original_size: Tuple[int, int] = None,
578
- crops_coords_top_left: Tuple[int, int] = (0, 0),
579
- target_size: Tuple[int, int] = None,
580
- negative_original_size: Optional[Tuple[int, int]] = None,
581
- negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
582
- negative_target_size: Optional[Tuple[int, int]] = None,
583
- aesthetic_score: float = 6.0,
584
- negative_aesthetic_score: float = 2.5,
585
- clip_skip: Optional[int] = None,
586
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
587
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
588
- **kwargs,
589
- ):
590
- r"""
591
- The call function to the pipeline for generation.
592
-
593
- Args:
594
- prompt (`str` or `List[str]`, *optional*):
595
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
596
- prompt_2 (`str` or `List[str]`, *optional*):
597
- The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
598
- used in both text-encoders.
599
- image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
600
- `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
601
- The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
602
- specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
603
- accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
604
- and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
605
- `init`, images must be passed as a list such that each element of the list can be correctly batched for
606
- input to a single ControlNet.
607
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
608
- The height in pixels of the generated image. Anything below 512 pixels won't work well for
609
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
610
- and checkpoints that are not specifically fine-tuned on low resolutions.
611
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
612
- The width in pixels of the generated image. Anything below 512 pixels won't work well for
613
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
614
- and checkpoints that are not specifically fine-tuned on low resolutions.
615
- num_inference_steps (`int`, *optional*, defaults to 50):
616
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
617
- expense of slower inference.
618
- guidance_scale (`float`, *optional*, defaults to 5.0):
619
- A higher guidance scale value encourages the model to generate images closely linked to the text
620
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
621
- negative_prompt (`str` or `List[str]`, *optional*):
622
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
623
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
624
- negative_prompt_2 (`str` or `List[str]`, *optional*):
625
- The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
626
- and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
627
- num_images_per_prompt (`int`, *optional*, defaults to 1):
628
- The number of images to generate per prompt.
629
- eta (`float`, *optional*, defaults to 0.0):
630
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
631
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
632
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
633
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
634
- generation deterministic.
635
- latents (`torch.Tensor`, *optional*):
636
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
637
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
638
- tensor is generated by sampling using the supplied random `generator`.
639
- prompt_embeds (`torch.Tensor`, *optional*):
640
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
641
- provided, text embeddings are generated from the `prompt` input argument.
642
- negative_prompt_embeds (`torch.Tensor`, *optional*):
643
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
644
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
645
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
646
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
647
- not provided, pooled text embeddings are generated from `prompt` input argument.
648
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
649
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
650
- weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
651
- argument.
652
- image_embeds (`torch.Tensor`, *optional*):
653
- Pre-generated image embeddings.
654
- output_type (`str`, *optional*, defaults to `"pil"`):
655
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
656
- return_dict (`bool`, *optional*, defaults to `True`):
657
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
658
- plain tuple.
659
- cross_attention_kwargs (`dict`, *optional*):
660
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
661
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
662
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
663
- The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
664
- to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
665
- the corresponding scale as a list.
666
- guess_mode (`bool`, *optional*, defaults to `False`):
667
- The ControlNet encoder tries to recognize the content of the input image even if you remove all
668
- prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
669
- control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
670
- The percentage of total steps at which the ControlNet starts applying.
671
- control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
672
- The percentage of total steps at which the ControlNet stops applying.
673
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
674
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
675
- `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
676
- explained in section 2.2 of
677
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
678
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
679
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
680
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
681
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
682
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
683
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
684
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
685
- not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
686
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
687
- negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
688
- To negatively condition the generation process based on a specific image resolution. Part of SDXL's
689
- micro-conditioning as explained in section 2.2 of
690
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
691
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
692
- negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
693
- To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
694
- micro-conditioning as explained in section 2.2 of
695
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
696
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
697
- negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
698
- To negatively condition the generation process based on a target image resolution. It should be as same
699
- as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
700
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
701
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
702
- clip_skip (`int`, *optional*):
703
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
704
- the output of the pre-final layer will be used for computing the prompt embeddings.
705
- callback_on_step_end (`Callable`, *optional*):
706
- A function that calls at the end of each denoising steps during the inference. The function is called
707
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
708
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
709
- `callback_on_step_end_tensor_inputs`.
710
- callback_on_step_end_tensor_inputs (`List`, *optional*):
711
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
712
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
713
- `._callback_tensor_inputs` attribute of your pipeline class.
714
-
715
- Examples:
716
-
717
- Returns:
718
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
719
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
720
- otherwise a `tuple` is returned containing the output images.
721
- """
722
-
723
- callback = kwargs.pop("callback", None)
724
- callback_steps = kwargs.pop("callback_steps", None)
725
-
726
- if callback is not None:
727
- deprecate(
728
- "callback",
729
- "1.0.0",
730
- "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
731
- )
732
- if callback_steps is not None:
733
- deprecate(
734
- "callback_steps",
735
- "1.0.0",
736
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
737
- )
738
-
739
- controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
740
-
741
- # align format for control guidance
742
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
743
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
744
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
745
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
746
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
747
- mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
748
- control_guidance_start, control_guidance_end = (
749
- mult * [control_guidance_start],
750
- mult * [control_guidance_end],
751
- )
752
-
753
- # 1. Check inputs. Raise error if not correct
754
- self.check_inputs(
755
- prompt,
756
- prompt_2,
757
- control_image,
758
- strength,
759
- num_inference_steps,
760
- callback_steps,
761
- negative_prompt,
762
- negative_prompt_2,
763
- prompt_embeds,
764
- negative_prompt_embeds,
765
- pooled_prompt_embeds,
766
- negative_pooled_prompt_embeds,
767
- None,
768
- None,
769
- controlnet_conditioning_scale,
770
- control_guidance_start,
771
- control_guidance_end,
772
- callback_on_step_end_tensor_inputs,
773
- )
774
-
775
- self._guidance_scale = guidance_scale
776
- self._clip_skip = clip_skip
777
- self._cross_attention_kwargs = cross_attention_kwargs
778
-
779
- # 2. Define call parameters
780
- if prompt is not None and isinstance(prompt, str):
781
- batch_size = 1
782
- elif prompt is not None and isinstance(prompt, list):
783
- batch_size = len(prompt)
784
- else:
785
- batch_size = prompt_embeds.shape[0]
786
-
787
- device = self._execution_device
788
-
789
- if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
790
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
791
-
792
- global_pool_conditions = (
793
- controlnet.config.global_pool_conditions
794
- if isinstance(controlnet, ControlNetModel)
795
- else controlnet.nets[0].config.global_pool_conditions
796
- )
797
- guess_mode = guess_mode or global_pool_conditions
798
-
799
- # 3.1 Encode input prompt
800
- text_encoder_lora_scale = (
801
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
802
- )
803
- (
804
- prompt_embeds,
805
- negative_prompt_embeds,
806
- pooled_prompt_embeds,
807
- negative_pooled_prompt_embeds,
808
- ) = self.encode_prompt(
809
- prompt,
810
- prompt_2,
811
- device,
812
- num_images_per_prompt,
813
- self.do_classifier_free_guidance,
814
- negative_prompt,
815
- negative_prompt_2,
816
- prompt_embeds=prompt_embeds,
817
- negative_prompt_embeds=negative_prompt_embeds,
818
- pooled_prompt_embeds=pooled_prompt_embeds,
819
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
820
- lora_scale=text_encoder_lora_scale,
821
- clip_skip=self.clip_skip,
822
- )
823
-
824
- # 3.2 Encode image prompt
825
- prompt_image_emb = self._encode_prompt_image_emb(
826
- image_embeds, device, self.unet.dtype, self.do_classifier_free_guidance
827
- )
828
- bs_embed, seq_len, _ = prompt_image_emb.shape
829
- prompt_image_emb = prompt_image_emb.repeat(1, num_images_per_prompt, 1)
830
- prompt_image_emb = prompt_image_emb.view(bs_embed * num_images_per_prompt, seq_len, -1)
831
-
832
- # 4. Prepare image and controlnet_conditioning_image
833
- image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
834
-
835
- if isinstance(controlnet, ControlNetModel):
836
- control_image = self.prepare_control_image(
837
- image=control_image,
838
- width=width,
839
- height=height,
840
- batch_size=batch_size * num_images_per_prompt,
841
- num_images_per_prompt=num_images_per_prompt,
842
- device=device,
843
- dtype=controlnet.dtype,
844
- do_classifier_free_guidance=self.do_classifier_free_guidance,
845
- guess_mode=guess_mode,
846
- )
847
- height, width = control_image.shape[-2:]
848
- elif isinstance(controlnet, MultiControlNetModel):
849
- control_images = []
850
-
851
- for control_image_ in control_image:
852
- control_image_ = self.prepare_control_image(
853
- image=control_image_,
854
- width=width,
855
- height=height,
856
- batch_size=batch_size * num_images_per_prompt,
857
- num_images_per_prompt=num_images_per_prompt,
858
- device=device,
859
- dtype=controlnet.dtype,
860
- do_classifier_free_guidance=self.do_classifier_free_guidance,
861
- guess_mode=guess_mode,
862
- )
863
-
864
- control_images.append(control_image_)
865
-
866
- control_image = control_images
867
- height, width = control_image[0].shape[-2:]
868
- else:
869
- assert False
870
-
871
- # 5. Prepare timesteps
872
- self.scheduler.set_timesteps(num_inference_steps, device=device)
873
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
874
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
875
- self._num_timesteps = len(timesteps)
876
-
877
- # 6. Prepare latent variables
878
- latents = self.prepare_latents(
879
- image,
880
- latent_timestep,
881
- batch_size,
882
- num_images_per_prompt,
883
- prompt_embeds.dtype,
884
- device,
885
- generator,
886
- True,
887
- )
888
-
889
- # # 6.5 Optionally get Guidance Scale Embedding
890
- timestep_cond = None
891
- if self.unet.config.time_cond_proj_dim is not None:
892
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
893
- timestep_cond = self.get_guidance_scale_embedding(
894
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
895
- ).to(device=device, dtype=latents.dtype)
896
-
897
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
898
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
899
-
900
- # 7.1 Create tensor stating which controlnets to keep
901
- controlnet_keep = []
902
- for i in range(len(timesteps)):
903
- keeps = [
904
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
905
- for s, e in zip(control_guidance_start, control_guidance_end)
906
- ]
907
- controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
908
-
909
- # 7.2 Prepare added time ids & embeddings
910
- if isinstance(control_image, list):
911
- original_size = original_size or control_image[0].shape[-2:]
912
- else:
913
- original_size = original_size or control_image.shape[-2:]
914
- target_size = target_size or (height, width)
915
-
916
- if negative_original_size is None:
917
- negative_original_size = original_size
918
- if negative_target_size is None:
919
- negative_target_size = target_size
920
- add_text_embeds = pooled_prompt_embeds
921
-
922
- if self.text_encoder_2 is None:
923
- text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
924
- else:
925
- text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
926
-
927
- add_time_ids, add_neg_time_ids = self._get_add_time_ids(
928
- original_size,
929
- crops_coords_top_left,
930
- target_size,
931
- aesthetic_score,
932
- negative_aesthetic_score,
933
- negative_original_size,
934
- negative_crops_coords_top_left,
935
- negative_target_size,
936
- dtype=prompt_embeds.dtype,
937
- text_encoder_projection_dim=text_encoder_projection_dim,
938
- )
939
- add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
940
-
941
- if self.do_classifier_free_guidance:
942
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
943
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
944
- add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
945
- add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
946
-
947
- prompt_embeds = prompt_embeds.to(device)
948
- add_text_embeds = add_text_embeds.to(device)
949
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
950
- encoder_hidden_states = torch.cat([prompt_embeds, prompt_image_emb], dim=1)
951
-
952
- # 8. Denoising loop
953
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
954
- is_unet_compiled = is_compiled_module(self.unet)
955
- is_controlnet_compiled = is_compiled_module(self.controlnet)
956
- is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
957
-
958
- with self.progress_bar(total=num_inference_steps) as progress_bar:
959
- for i, t in enumerate(timesteps):
960
- # Relevant thread:
961
- # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
962
- if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
963
- torch._inductor.cudagraph_mark_step_begin()
964
- # expand the latents if we are doing classifier free guidance
965
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
966
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
967
-
968
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
969
-
970
- # controlnet(s) inference
971
- if guess_mode and self.do_classifier_free_guidance:
972
- # Infer ControlNet only for the conditional batch.
973
- control_model_input = latents
974
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
975
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
976
- controlnet_added_cond_kwargs = {
977
- "text_embeds": add_text_embeds.chunk(2)[1],
978
- "time_ids": add_time_ids.chunk(2)[1],
979
- }
980
- else:
981
- control_model_input = latent_model_input
982
- controlnet_prompt_embeds = prompt_embeds
983
- controlnet_added_cond_kwargs = added_cond_kwargs
984
-
985
- if isinstance(controlnet_keep[i], list):
986
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
987
- else:
988
- controlnet_cond_scale = controlnet_conditioning_scale
989
- if isinstance(controlnet_cond_scale, list):
990
- controlnet_cond_scale = controlnet_cond_scale[0]
991
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
992
-
993
- down_block_res_samples, mid_block_res_sample = self.controlnet(
994
- control_model_input,
995
- t,
996
- encoder_hidden_states=prompt_image_emb,
997
- controlnet_cond=control_image,
998
- conditioning_scale=cond_scale,
999
- guess_mode=guess_mode,
1000
- added_cond_kwargs=controlnet_added_cond_kwargs,
1001
- return_dict=False,
1002
- )
1003
-
1004
- if guess_mode and self.do_classifier_free_guidance:
1005
- # Infered ControlNet only for the conditional batch.
1006
- # To apply the output of ControlNet to both the unconditional and conditional batches,
1007
- # add 0 to the unconditional batch to keep it unchanged.
1008
- down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1009
- mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1010
-
1011
- # predict the noise residual
1012
- noise_pred = self.unet(
1013
- latent_model_input,
1014
- t,
1015
- encoder_hidden_states=encoder_hidden_states,
1016
- timestep_cond=timestep_cond,
1017
- cross_attention_kwargs=self.cross_attention_kwargs,
1018
- down_block_additional_residuals=down_block_res_samples,
1019
- mid_block_additional_residual=mid_block_res_sample,
1020
- added_cond_kwargs=added_cond_kwargs,
1021
- return_dict=False,
1022
- )[0]
1023
-
1024
- # perform guidance
1025
- if self.do_classifier_free_guidance:
1026
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1027
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1028
-
1029
- # compute the previous noisy sample x_t -> x_t-1
1030
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1031
-
1032
- if callback_on_step_end is not None:
1033
- callback_kwargs = {}
1034
- for k in callback_on_step_end_tensor_inputs:
1035
- callback_kwargs[k] = locals()[k]
1036
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1037
-
1038
- latents = callback_outputs.pop("latents", latents)
1039
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1040
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1041
-
1042
- # call the callback, if provided
1043
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1044
- progress_bar.update()
1045
- if callback is not None and i % callback_steps == 0:
1046
- step_idx = i // getattr(self.scheduler, "order", 1)
1047
- callback(step_idx, t, latents)
1048
-
1049
- if not output_type == "latent":
1050
- # make sure the VAE is in float32 mode, as it overflows in float16
1051
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1052
- if needs_upcasting:
1053
- self.upcast_vae()
1054
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1055
-
1056
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1057
-
1058
- # cast back to fp16 if needed
1059
- if needs_upcasting:
1060
- self.vae.to(dtype=torch.float16)
1061
- else:
1062
- image = latents
1063
-
1064
- if not output_type == "latent":
1065
- # apply watermark if available
1066
- if self.watermark is not None:
1067
- image = self.watermark.apply_watermark(image)
1068
-
1069
- image = self.image_processor.postprocess(image, output_type=output_type)
1070
-
1071
- # Offload all models
1072
- self.maybe_free_model_hooks()
1073
-
1074
- if not return_dict:
1075
- return (image,)
1076
-
1077
- return StableDiffusionXLPipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_xl_instantid.py DELETED
@@ -1,1066 +0,0 @@
1
- # Copyright 2024 The InstantX Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import math
17
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
-
19
- import cv2
20
- import numpy as np
21
- import PIL.Image
22
- import torch
23
- import torch.nn as nn
24
-
25
- from diffusers import StableDiffusionXLControlNetPipeline
26
- from diffusers.image_processor import PipelineImageInput
27
- from diffusers.models import ControlNetModel
28
- from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
29
- from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
30
- from diffusers.utils import (
31
- deprecate,
32
- logging,
33
- replace_example_docstring,
34
- )
35
- from diffusers.utils.import_utils import is_xformers_available
36
- from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
37
-
38
-
39
- try:
40
- import xformers
41
- import xformers.ops
42
-
43
- xformers_available = True
44
- except Exception:
45
- xformers_available = False
46
-
47
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
48
-
49
- logger.warning(
50
- "To use instant id pipelines, please make sure you have the `insightface` library installed: `pip install insightface`."
51
- "Please refer to: https://huggingface.co/InstantX/InstantID for further instructions regarding inference"
52
- )
53
-
54
-
55
- def FeedForward(dim, mult=4):
56
- inner_dim = int(dim * mult)
57
- return nn.Sequential(
58
- nn.LayerNorm(dim),
59
- nn.Linear(dim, inner_dim, bias=False),
60
- nn.GELU(),
61
- nn.Linear(inner_dim, dim, bias=False),
62
- )
63
-
64
-
65
- def reshape_tensor(x, heads):
66
- bs, length, width = x.shape
67
- # (bs, length, width) --> (bs, length, n_heads, dim_per_head)
68
- x = x.view(bs, length, heads, -1)
69
- # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
70
- x = x.transpose(1, 2)
71
- # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
72
- x = x.reshape(bs, heads, length, -1)
73
- return x
74
-
75
-
76
- class PerceiverAttention(nn.Module):
77
- def __init__(self, *, dim, dim_head=64, heads=8):
78
- super().__init__()
79
- self.scale = dim_head**-0.5
80
- self.dim_head = dim_head
81
- self.heads = heads
82
- inner_dim = dim_head * heads
83
-
84
- self.norm1 = nn.LayerNorm(dim)
85
- self.norm2 = nn.LayerNorm(dim)
86
-
87
- self.to_q = nn.Linear(dim, inner_dim, bias=False)
88
- self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
89
- self.to_out = nn.Linear(inner_dim, dim, bias=False)
90
-
91
- def forward(self, x, latents):
92
- """
93
- Args:
94
- x (torch.Tensor): image features
95
- shape (b, n1, D)
96
- latent (torch.Tensor): latent features
97
- shape (b, n2, D)
98
- """
99
- x = self.norm1(x)
100
- latents = self.norm2(latents)
101
-
102
- b, l, _ = latents.shape
103
-
104
- q = self.to_q(latents)
105
- kv_input = torch.cat((x, latents), dim=-2)
106
- k, v = self.to_kv(kv_input).chunk(2, dim=-1)
107
-
108
- q = reshape_tensor(q, self.heads)
109
- k = reshape_tensor(k, self.heads)
110
- v = reshape_tensor(v, self.heads)
111
-
112
- # attention
113
- scale = 1 / math.sqrt(math.sqrt(self.dim_head))
114
- weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
115
- weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
116
- out = weight @ v
117
-
118
- out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
119
-
120
- return self.to_out(out)
121
-
122
-
123
- class Resampler(nn.Module):
124
- def __init__(
125
- self,
126
- dim=1024,
127
- depth=8,
128
- dim_head=64,
129
- heads=16,
130
- num_queries=8,
131
- embedding_dim=768,
132
- output_dim=1024,
133
- ff_mult=4,
134
- ):
135
- super().__init__()
136
-
137
- self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
138
-
139
- self.proj_in = nn.Linear(embedding_dim, dim)
140
-
141
- self.proj_out = nn.Linear(dim, output_dim)
142
- self.norm_out = nn.LayerNorm(output_dim)
143
-
144
- self.layers = nn.ModuleList([])
145
- for _ in range(depth):
146
- self.layers.append(
147
- nn.ModuleList(
148
- [
149
- PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
150
- FeedForward(dim=dim, mult=ff_mult),
151
- ]
152
- )
153
- )
154
-
155
- def forward(self, x):
156
- latents = self.latents.repeat(x.size(0), 1, 1)
157
- x = self.proj_in(x)
158
-
159
- for attn, ff in self.layers:
160
- latents = attn(x, latents) + latents
161
- latents = ff(latents) + latents
162
-
163
- latents = self.proj_out(latents)
164
- return self.norm_out(latents)
165
-
166
-
167
- class AttnProcessor(nn.Module):
168
- r"""
169
- Default processor for performing attention-related computations.
170
- """
171
-
172
- def __init__(
173
- self,
174
- hidden_size=None,
175
- cross_attention_dim=None,
176
- ):
177
- super().__init__()
178
-
179
- def __call__(
180
- self,
181
- attn,
182
- hidden_states,
183
- encoder_hidden_states=None,
184
- attention_mask=None,
185
- temb=None,
186
- ):
187
- residual = hidden_states
188
-
189
- if attn.spatial_norm is not None:
190
- hidden_states = attn.spatial_norm(hidden_states, temb)
191
-
192
- input_ndim = hidden_states.ndim
193
-
194
- if input_ndim == 4:
195
- batch_size, channel, height, width = hidden_states.shape
196
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
197
-
198
- batch_size, sequence_length, _ = (
199
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
200
- )
201
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
202
-
203
- if attn.group_norm is not None:
204
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
205
-
206
- query = attn.to_q(hidden_states)
207
-
208
- if encoder_hidden_states is None:
209
- encoder_hidden_states = hidden_states
210
- elif attn.norm_cross:
211
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
212
-
213
- key = attn.to_k(encoder_hidden_states)
214
- value = attn.to_v(encoder_hidden_states)
215
-
216
- query = attn.head_to_batch_dim(query)
217
- key = attn.head_to_batch_dim(key)
218
- value = attn.head_to_batch_dim(value)
219
-
220
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
221
- hidden_states = torch.bmm(attention_probs, value)
222
- hidden_states = attn.batch_to_head_dim(hidden_states)
223
-
224
- # linear proj
225
- hidden_states = attn.to_out[0](hidden_states)
226
- # dropout
227
- hidden_states = attn.to_out[1](hidden_states)
228
-
229
- if input_ndim == 4:
230
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
231
-
232
- if attn.residual_connection:
233
- hidden_states = hidden_states + residual
234
-
235
- hidden_states = hidden_states / attn.rescale_output_factor
236
-
237
- return hidden_states
238
-
239
-
240
- class IPAttnProcessor(nn.Module):
241
- r"""
242
- Attention processor for IP-Adapater.
243
- Args:
244
- hidden_size (`int`):
245
- The hidden size of the attention layer.
246
- cross_attention_dim (`int`):
247
- The number of channels in the `encoder_hidden_states`.
248
- scale (`float`, defaults to 1.0):
249
- the weight scale of image prompt.
250
- num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
251
- The context length of the image features.
252
- """
253
-
254
- def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
255
- super().__init__()
256
-
257
- self.hidden_size = hidden_size
258
- self.cross_attention_dim = cross_attention_dim
259
- self.scale = scale
260
- self.num_tokens = num_tokens
261
-
262
- self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
263
- self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
264
-
265
- def __call__(
266
- self,
267
- attn,
268
- hidden_states,
269
- encoder_hidden_states=None,
270
- attention_mask=None,
271
- temb=None,
272
- ):
273
- residual = hidden_states
274
-
275
- if attn.spatial_norm is not None:
276
- hidden_states = attn.spatial_norm(hidden_states, temb)
277
-
278
- input_ndim = hidden_states.ndim
279
-
280
- if input_ndim == 4:
281
- batch_size, channel, height, width = hidden_states.shape
282
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
283
-
284
- batch_size, sequence_length, _ = (
285
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
286
- )
287
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
288
-
289
- if attn.group_norm is not None:
290
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
291
-
292
- query = attn.to_q(hidden_states)
293
-
294
- if encoder_hidden_states is None:
295
- encoder_hidden_states = hidden_states
296
- else:
297
- # get encoder_hidden_states, ip_hidden_states
298
- end_pos = encoder_hidden_states.shape[1] - self.num_tokens
299
- encoder_hidden_states, ip_hidden_states = (
300
- encoder_hidden_states[:, :end_pos, :],
301
- encoder_hidden_states[:, end_pos:, :],
302
- )
303
- if attn.norm_cross:
304
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
305
-
306
- key = attn.to_k(encoder_hidden_states)
307
- value = attn.to_v(encoder_hidden_states)
308
-
309
- query = attn.head_to_batch_dim(query)
310
- key = attn.head_to_batch_dim(key)
311
- value = attn.head_to_batch_dim(value)
312
-
313
- if xformers_available:
314
- hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
315
- else:
316
- attention_probs = attn.get_attention_scores(query, key, attention_mask)
317
- hidden_states = torch.bmm(attention_probs, value)
318
- hidden_states = attn.batch_to_head_dim(hidden_states)
319
-
320
- # for ip-adapter
321
- ip_key = self.to_k_ip(ip_hidden_states)
322
- ip_value = self.to_v_ip(ip_hidden_states)
323
-
324
- ip_key = attn.head_to_batch_dim(ip_key)
325
- ip_value = attn.head_to_batch_dim(ip_value)
326
-
327
- if xformers_available:
328
- ip_hidden_states = self._memory_efficient_attention_xformers(query, ip_key, ip_value, None)
329
- else:
330
- ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
331
- ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
332
- ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
333
-
334
- hidden_states = hidden_states + self.scale * ip_hidden_states
335
-
336
- # linear proj
337
- hidden_states = attn.to_out[0](hidden_states)
338
- # dropout
339
- hidden_states = attn.to_out[1](hidden_states)
340
-
341
- if input_ndim == 4:
342
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
343
-
344
- if attn.residual_connection:
345
- hidden_states = hidden_states + residual
346
-
347
- hidden_states = hidden_states / attn.rescale_output_factor
348
-
349
- return hidden_states
350
-
351
- def _memory_efficient_attention_xformers(self, query, key, value, attention_mask):
352
- # TODO attention_mask
353
- query = query.contiguous()
354
- key = key.contiguous()
355
- value = value.contiguous()
356
- hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
357
- return hidden_states
358
-
359
-
360
- EXAMPLE_DOC_STRING = """
361
- Examples:
362
- ```py
363
- >>> # !pip install opencv-python transformers accelerate insightface
364
- >>> import diffusers
365
- >>> from diffusers.utils import load_image
366
- >>> from diffusers.models import ControlNetModel
367
-
368
- >>> import cv2
369
- >>> import torch
370
- >>> import numpy as np
371
- >>> from PIL import Image
372
-
373
- >>> from insightface.app import FaceAnalysis
374
- >>> from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps
375
-
376
- >>> # download 'antelopev2' under ./models
377
- >>> app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
378
- >>> app.prepare(ctx_id=0, det_size=(640, 640))
379
-
380
- >>> # download models under ./checkpoints
381
- >>> face_adapter = f'./checkpoints/ip-adapter.bin'
382
- >>> controlnet_path = f'./checkpoints/ControlNetModel'
383
-
384
- >>> # load IdentityNet
385
- >>> controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
386
-
387
- >>> pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
388
- ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16
389
- ... )
390
- >>> pipe.cuda()
391
-
392
- >>> # load adapter
393
- >>> pipe.load_ip_adapter_instantid(face_adapter)
394
-
395
- >>> prompt = "analog film photo of a man. faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage, masterpiece, best quality"
396
- >>> negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured"
397
-
398
- >>> # load an image
399
- >>> image = load_image("your-example.jpg")
400
-
401
- >>> face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))[-1]
402
- >>> face_emb = face_info['embedding']
403
- >>> face_kps = draw_kps(face_image, face_info['kps'])
404
-
405
- >>> pipe.set_ip_adapter_scale(0.8)
406
-
407
- >>> # generate image
408
- >>> image = pipe(
409
- ... prompt, image_embeds=face_emb, image=face_kps, controlnet_conditioning_scale=0.8
410
- ... ).images[0]
411
- ```
412
- """
413
-
414
-
415
- def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]):
416
- stickwidth = 4
417
- limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
418
- kps = np.array(kps)
419
-
420
- w, h = image_pil.size
421
- out_img = np.zeros([h, w, 3])
422
-
423
- for i in range(len(limbSeq)):
424
- index = limbSeq[i]
425
- color = color_list[index[0]]
426
-
427
- x = kps[index][:, 0]
428
- y = kps[index][:, 1]
429
- length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
430
- angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
431
- polygon = cv2.ellipse2Poly(
432
- (int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1
433
- )
434
- out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
435
- out_img = (out_img * 0.6).astype(np.uint8)
436
-
437
- for idx_kp, kp in enumerate(kps):
438
- color = color_list[idx_kp]
439
- x, y = kp
440
- out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
441
-
442
- out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
443
- return out_img_pil
444
-
445
-
446
- class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
447
- def cuda(self, dtype=torch.float16, use_xformers=False):
448
- self.to("cuda", dtype)
449
-
450
- if hasattr(self, "image_proj_model"):
451
- self.image_proj_model.to(self.unet.device).to(self.unet.dtype)
452
-
453
- if use_xformers:
454
- if is_xformers_available():
455
- import xformers
456
- from packaging import version
457
-
458
- xformers_version = version.parse(xformers.__version__)
459
- if xformers_version == version.parse("0.0.16"):
460
- logger.warning(
461
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
462
- )
463
- self.enable_xformers_memory_efficient_attention()
464
- else:
465
- raise ValueError("xformers is not available. Make sure it is installed correctly")
466
-
467
- def load_ip_adapter_instantid(self, model_ckpt, image_emb_dim=512, num_tokens=16, scale=0.5):
468
- self.set_image_proj_model(model_ckpt, image_emb_dim, num_tokens)
469
- self.set_ip_adapter(model_ckpt, num_tokens, scale)
470
-
471
- def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16):
472
- image_proj_model = Resampler(
473
- dim=1280,
474
- depth=4,
475
- dim_head=64,
476
- heads=20,
477
- num_queries=num_tokens,
478
- embedding_dim=image_emb_dim,
479
- output_dim=self.unet.config.cross_attention_dim,
480
- ff_mult=4,
481
- )
482
-
483
- image_proj_model.eval()
484
-
485
- self.image_proj_model = image_proj_model.to(self.device, dtype=self.dtype)
486
- state_dict = torch.load(model_ckpt, map_location="cpu")
487
- if "image_proj" in state_dict:
488
- state_dict = state_dict["image_proj"]
489
- self.image_proj_model.load_state_dict(state_dict)
490
-
491
- self.image_proj_model_in_features = image_emb_dim
492
-
493
- def set_ip_adapter(self, model_ckpt, num_tokens, scale):
494
- unet = self.unet
495
- attn_procs = {}
496
- for name in unet.attn_processors.keys():
497
- cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
498
- if name.startswith("mid_block"):
499
- hidden_size = unet.config.block_out_channels[-1]
500
- elif name.startswith("up_blocks"):
501
- block_id = int(name[len("up_blocks.")])
502
- hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
503
- elif name.startswith("down_blocks"):
504
- block_id = int(name[len("down_blocks.")])
505
- hidden_size = unet.config.block_out_channels[block_id]
506
- if cross_attention_dim is None:
507
- attn_procs[name] = AttnProcessor().to(unet.device, dtype=unet.dtype)
508
- else:
509
- attn_procs[name] = IPAttnProcessor(
510
- hidden_size=hidden_size,
511
- cross_attention_dim=cross_attention_dim,
512
- scale=scale,
513
- num_tokens=num_tokens,
514
- ).to(unet.device, dtype=unet.dtype)
515
- unet.set_attn_processor(attn_procs)
516
-
517
- state_dict = torch.load(model_ckpt, map_location="cpu")
518
- ip_layers = torch.nn.ModuleList(self.unet.attn_processors.values())
519
- if "ip_adapter" in state_dict:
520
- state_dict = state_dict["ip_adapter"]
521
- ip_layers.load_state_dict(state_dict)
522
-
523
- def set_ip_adapter_scale(self, scale):
524
- unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
525
- for attn_processor in unet.attn_processors.values():
526
- if isinstance(attn_processor, IPAttnProcessor):
527
- attn_processor.scale = scale
528
-
529
- def _encode_prompt_image_emb(self, prompt_image_emb, device, dtype, do_classifier_free_guidance):
530
- if isinstance(prompt_image_emb, torch.Tensor):
531
- prompt_image_emb = prompt_image_emb.clone().detach()
532
- else:
533
- prompt_image_emb = torch.tensor(prompt_image_emb)
534
-
535
- prompt_image_emb = prompt_image_emb.to(device=device, dtype=dtype)
536
- prompt_image_emb = prompt_image_emb.reshape([1, -1, self.image_proj_model_in_features])
537
-
538
- if do_classifier_free_guidance:
539
- prompt_image_emb = torch.cat([torch.zeros_like(prompt_image_emb), prompt_image_emb], dim=0)
540
- else:
541
- prompt_image_emb = torch.cat([prompt_image_emb], dim=0)
542
-
543
- prompt_image_emb = self.image_proj_model(prompt_image_emb)
544
- return prompt_image_emb
545
-
546
- @torch.no_grad()
547
- @replace_example_docstring(EXAMPLE_DOC_STRING)
548
- def __call__(
549
- self,
550
- prompt: Union[str, List[str]] = None,
551
- prompt_2: Optional[Union[str, List[str]]] = None,
552
- image: PipelineImageInput = None,
553
- height: Optional[int] = None,
554
- width: Optional[int] = None,
555
- num_inference_steps: int = 50,
556
- guidance_scale: float = 5.0,
557
- negative_prompt: Optional[Union[str, List[str]]] = None,
558
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
559
- num_images_per_prompt: Optional[int] = 1,
560
- eta: float = 0.0,
561
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
562
- latents: Optional[torch.Tensor] = None,
563
- prompt_embeds: Optional[torch.Tensor] = None,
564
- negative_prompt_embeds: Optional[torch.Tensor] = None,
565
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
566
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
567
- image_embeds: Optional[torch.Tensor] = None,
568
- output_type: Optional[str] = "pil",
569
- return_dict: bool = True,
570
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
571
- controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
572
- guess_mode: bool = False,
573
- control_guidance_start: Union[float, List[float]] = 0.0,
574
- control_guidance_end: Union[float, List[float]] = 1.0,
575
- original_size: Tuple[int, int] = None,
576
- crops_coords_top_left: Tuple[int, int] = (0, 0),
577
- target_size: Tuple[int, int] = None,
578
- negative_original_size: Optional[Tuple[int, int]] = None,
579
- negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
580
- negative_target_size: Optional[Tuple[int, int]] = None,
581
- clip_skip: Optional[int] = None,
582
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
583
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
584
- **kwargs,
585
- ):
586
- r"""
587
- The call function to the pipeline for generation.
588
-
589
- Args:
590
- prompt (`str` or `List[str]`, *optional*):
591
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
592
- prompt_2 (`str` or `List[str]`, *optional*):
593
- The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
594
- used in both text-encoders.
595
- image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
596
- `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
597
- The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
598
- specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
599
- accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
600
- and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
601
- `init`, images must be passed as a list such that each element of the list can be correctly batched for
602
- input to a single ControlNet.
603
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
604
- The height in pixels of the generated image. Anything below 512 pixels won't work well for
605
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
606
- and checkpoints that are not specifically fine-tuned on low resolutions.
607
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
608
- The width in pixels of the generated image. Anything below 512 pixels won't work well for
609
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
610
- and checkpoints that are not specifically fine-tuned on low resolutions.
611
- num_inference_steps (`int`, *optional*, defaults to 50):
612
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
613
- expense of slower inference.
614
- guidance_scale (`float`, *optional*, defaults to 5.0):
615
- A higher guidance scale value encourages the model to generate images closely linked to the text
616
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
617
- negative_prompt (`str` or `List[str]`, *optional*):
618
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
619
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
620
- negative_prompt_2 (`str` or `List[str]`, *optional*):
621
- The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
622
- and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
623
- num_images_per_prompt (`int`, *optional*, defaults to 1):
624
- The number of images to generate per prompt.
625
- eta (`float`, *optional*, defaults to 0.0):
626
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
627
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
628
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
629
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
630
- generation deterministic.
631
- latents (`torch.Tensor`, *optional*):
632
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
633
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
634
- tensor is generated by sampling using the supplied random `generator`.
635
- prompt_embeds (`torch.Tensor`, *optional*):
636
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
637
- provided, text embeddings are generated from the `prompt` input argument.
638
- negative_prompt_embeds (`torch.Tensor`, *optional*):
639
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
640
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
641
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
642
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
643
- not provided, pooled text embeddings are generated from `prompt` input argument.
644
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
645
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
646
- weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
647
- argument.
648
- image_embeds (`torch.Tensor`, *optional*):
649
- Pre-generated image embeddings.
650
- output_type (`str`, *optional*, defaults to `"pil"`):
651
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
652
- return_dict (`bool`, *optional*, defaults to `True`):
653
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
654
- plain tuple.
655
- cross_attention_kwargs (`dict`, *optional*):
656
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
657
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
658
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
659
- The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
660
- to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
661
- the corresponding scale as a list.
662
- guess_mode (`bool`, *optional*, defaults to `False`):
663
- The ControlNet encoder tries to recognize the content of the input image even if you remove all
664
- prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
665
- control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
666
- The percentage of total steps at which the ControlNet starts applying.
667
- control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
668
- The percentage of total steps at which the ControlNet stops applying.
669
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
670
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
671
- `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
672
- explained in section 2.2 of
673
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
674
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
675
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
676
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
677
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
678
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
679
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
680
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
681
- not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
682
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
683
- negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
684
- To negatively condition the generation process based on a specific image resolution. Part of SDXL's
685
- micro-conditioning as explained in section 2.2 of
686
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
687
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
688
- negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
689
- To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
690
- micro-conditioning as explained in section 2.2 of
691
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
692
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
693
- negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
694
- To negatively condition the generation process based on a target image resolution. It should be as same
695
- as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
696
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
697
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
698
- clip_skip (`int`, *optional*):
699
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
700
- the output of the pre-final layer will be used for computing the prompt embeddings.
701
- callback_on_step_end (`Callable`, *optional*):
702
- A function that calls at the end of each denoising steps during the inference. The function is called
703
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
704
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
705
- `callback_on_step_end_tensor_inputs`.
706
- callback_on_step_end_tensor_inputs (`List`, *optional*):
707
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
708
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
709
- `._callback_tensor_inputs` attribute of your pipeline class.
710
-
711
- Examples:
712
-
713
- Returns:
714
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
715
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
716
- otherwise a `tuple` is returned containing the output images.
717
- """
718
-
719
- callback = kwargs.pop("callback", None)
720
- callback_steps = kwargs.pop("callback_steps", None)
721
-
722
- if callback is not None:
723
- deprecate(
724
- "callback",
725
- "1.0.0",
726
- "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
727
- )
728
- if callback_steps is not None:
729
- deprecate(
730
- "callback_steps",
731
- "1.0.0",
732
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
733
- )
734
-
735
- controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
736
-
737
- # align format for control guidance
738
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
739
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
740
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
741
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
742
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
743
- mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
744
- control_guidance_start, control_guidance_end = (
745
- mult * [control_guidance_start],
746
- mult * [control_guidance_end],
747
- )
748
-
749
- # 1. Check inputs. Raise error if not correct
750
- self.check_inputs(
751
- prompt,
752
- prompt_2,
753
- image,
754
- callback_steps,
755
- negative_prompt,
756
- negative_prompt_2,
757
- prompt_embeds,
758
- negative_prompt_embeds,
759
- pooled_prompt_embeds,
760
- negative_pooled_prompt_embeds,
761
- controlnet_conditioning_scale,
762
- control_guidance_start,
763
- control_guidance_end,
764
- callback_on_step_end_tensor_inputs,
765
- )
766
-
767
- self._guidance_scale = guidance_scale
768
- self._clip_skip = clip_skip
769
- self._cross_attention_kwargs = cross_attention_kwargs
770
-
771
- # 2. Define call parameters
772
- if prompt is not None and isinstance(prompt, str):
773
- batch_size = 1
774
- elif prompt is not None and isinstance(prompt, list):
775
- batch_size = len(prompt)
776
- else:
777
- batch_size = prompt_embeds.shape[0]
778
-
779
- device = self._execution_device
780
-
781
- if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
782
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
783
-
784
- global_pool_conditions = (
785
- controlnet.config.global_pool_conditions
786
- if isinstance(controlnet, ControlNetModel)
787
- else controlnet.nets[0].config.global_pool_conditions
788
- )
789
- guess_mode = guess_mode or global_pool_conditions
790
-
791
- # 3.1 Encode input prompt
792
- text_encoder_lora_scale = (
793
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
794
- )
795
- (
796
- prompt_embeds,
797
- negative_prompt_embeds,
798
- pooled_prompt_embeds,
799
- negative_pooled_prompt_embeds,
800
- ) = self.encode_prompt(
801
- prompt,
802
- prompt_2,
803
- device,
804
- num_images_per_prompt,
805
- self.do_classifier_free_guidance,
806
- negative_prompt,
807
- negative_prompt_2,
808
- prompt_embeds=prompt_embeds,
809
- negative_prompt_embeds=negative_prompt_embeds,
810
- pooled_prompt_embeds=pooled_prompt_embeds,
811
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
812
- lora_scale=text_encoder_lora_scale,
813
- clip_skip=self.clip_skip,
814
- )
815
-
816
- # 3.2 Encode image prompt
817
- prompt_image_emb = self._encode_prompt_image_emb(
818
- image_embeds, device, self.unet.dtype, self.do_classifier_free_guidance
819
- )
820
- bs_embed, seq_len, _ = prompt_image_emb.shape
821
- prompt_image_emb = prompt_image_emb.repeat(1, num_images_per_prompt, 1)
822
- prompt_image_emb = prompt_image_emb.view(bs_embed * num_images_per_prompt, seq_len, -1)
823
-
824
- # 4. Prepare image
825
- if isinstance(controlnet, ControlNetModel):
826
- image = self.prepare_image(
827
- image=image,
828
- width=width,
829
- height=height,
830
- batch_size=batch_size * num_images_per_prompt,
831
- num_images_per_prompt=num_images_per_prompt,
832
- device=device,
833
- dtype=controlnet.dtype,
834
- do_classifier_free_guidance=self.do_classifier_free_guidance,
835
- guess_mode=guess_mode,
836
- )
837
- height, width = image.shape[-2:]
838
- elif isinstance(controlnet, MultiControlNetModel):
839
- images = []
840
-
841
- for image_ in image:
842
- image_ = self.prepare_image(
843
- image=image_,
844
- width=width,
845
- height=height,
846
- batch_size=batch_size * num_images_per_prompt,
847
- num_images_per_prompt=num_images_per_prompt,
848
- device=device,
849
- dtype=controlnet.dtype,
850
- do_classifier_free_guidance=self.do_classifier_free_guidance,
851
- guess_mode=guess_mode,
852
- )
853
-
854
- images.append(image_)
855
-
856
- image = images
857
- height, width = image[0].shape[-2:]
858
- else:
859
- assert False
860
-
861
- # 5. Prepare timesteps
862
- self.scheduler.set_timesteps(num_inference_steps, device=device)
863
- timesteps = self.scheduler.timesteps
864
- self._num_timesteps = len(timesteps)
865
-
866
- # 6. Prepare latent variables
867
- num_channels_latents = self.unet.config.in_channels
868
- latents = self.prepare_latents(
869
- batch_size * num_images_per_prompt,
870
- num_channels_latents,
871
- height,
872
- width,
873
- prompt_embeds.dtype,
874
- device,
875
- generator,
876
- latents,
877
- )
878
-
879
- # 6.5 Optionally get Guidance Scale Embedding
880
- timestep_cond = None
881
- if self.unet.config.time_cond_proj_dim is not None:
882
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
883
- timestep_cond = self.get_guidance_scale_embedding(
884
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
885
- ).to(device=device, dtype=latents.dtype)
886
-
887
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
888
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
889
-
890
- # 7.1 Create tensor stating which controlnets to keep
891
- controlnet_keep = []
892
- for i in range(len(timesteps)):
893
- keeps = [
894
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
895
- for s, e in zip(control_guidance_start, control_guidance_end)
896
- ]
897
- controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
898
-
899
- # 7.2 Prepare added time ids & embeddings
900
- if isinstance(image, list):
901
- original_size = original_size or image[0].shape[-2:]
902
- else:
903
- original_size = original_size or image.shape[-2:]
904
- target_size = target_size or (height, width)
905
-
906
- add_text_embeds = pooled_prompt_embeds
907
- if self.text_encoder_2 is None:
908
- text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
909
- else:
910
- text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
911
-
912
- add_time_ids = self._get_add_time_ids(
913
- original_size,
914
- crops_coords_top_left,
915
- target_size,
916
- dtype=prompt_embeds.dtype,
917
- text_encoder_projection_dim=text_encoder_projection_dim,
918
- )
919
-
920
- if negative_original_size is not None and negative_target_size is not None:
921
- negative_add_time_ids = self._get_add_time_ids(
922
- negative_original_size,
923
- negative_crops_coords_top_left,
924
- negative_target_size,
925
- dtype=prompt_embeds.dtype,
926
- text_encoder_projection_dim=text_encoder_projection_dim,
927
- )
928
- else:
929
- negative_add_time_ids = add_time_ids
930
-
931
- if self.do_classifier_free_guidance:
932
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
933
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
934
- add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
935
-
936
- prompt_embeds = prompt_embeds.to(device)
937
- add_text_embeds = add_text_embeds.to(device)
938
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
939
- encoder_hidden_states = torch.cat([prompt_embeds, prompt_image_emb], dim=1)
940
-
941
- # 8. Denoising loop
942
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
943
- is_unet_compiled = is_compiled_module(self.unet)
944
- is_controlnet_compiled = is_compiled_module(self.controlnet)
945
- is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
946
-
947
- with self.progress_bar(total=num_inference_steps) as progress_bar:
948
- for i, t in enumerate(timesteps):
949
- # Relevant thread:
950
- # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
951
- if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
952
- torch._inductor.cudagraph_mark_step_begin()
953
- # expand the latents if we are doing classifier free guidance
954
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
955
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
956
-
957
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
958
-
959
- # controlnet(s) inference
960
- if guess_mode and self.do_classifier_free_guidance:
961
- # Infer ControlNet only for the conditional batch.
962
- control_model_input = latents
963
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
964
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
965
- controlnet_added_cond_kwargs = {
966
- "text_embeds": add_text_embeds.chunk(2)[1],
967
- "time_ids": add_time_ids.chunk(2)[1],
968
- }
969
- else:
970
- control_model_input = latent_model_input
971
- controlnet_prompt_embeds = prompt_embeds
972
- controlnet_added_cond_kwargs = added_cond_kwargs
973
-
974
- if isinstance(controlnet_keep[i], list):
975
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
976
- else:
977
- controlnet_cond_scale = controlnet_conditioning_scale
978
- if isinstance(controlnet_cond_scale, list):
979
- controlnet_cond_scale = controlnet_cond_scale[0]
980
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
981
-
982
- down_block_res_samples, mid_block_res_sample = self.controlnet(
983
- control_model_input,
984
- t,
985
- encoder_hidden_states=prompt_image_emb,
986
- controlnet_cond=image,
987
- conditioning_scale=cond_scale,
988
- guess_mode=guess_mode,
989
- added_cond_kwargs=controlnet_added_cond_kwargs,
990
- return_dict=False,
991
- )
992
-
993
- if guess_mode and self.do_classifier_free_guidance:
994
- # Infered ControlNet only for the conditional batch.
995
- # To apply the output of ControlNet to both the unconditional and conditional batches,
996
- # add 0 to the unconditional batch to keep it unchanged.
997
- down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
998
- mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
999
-
1000
- # predict the noise residual
1001
- noise_pred = self.unet(
1002
- latent_model_input,
1003
- t,
1004
- encoder_hidden_states=encoder_hidden_states,
1005
- timestep_cond=timestep_cond,
1006
- cross_attention_kwargs=self.cross_attention_kwargs,
1007
- down_block_additional_residuals=down_block_res_samples,
1008
- mid_block_additional_residual=mid_block_res_sample,
1009
- added_cond_kwargs=added_cond_kwargs,
1010
- return_dict=False,
1011
- )[0]
1012
-
1013
- # perform guidance
1014
- if self.do_classifier_free_guidance:
1015
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1016
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1017
-
1018
- # compute the previous noisy sample x_t -> x_t-1
1019
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1020
-
1021
- if callback_on_step_end is not None:
1022
- callback_kwargs = {}
1023
- for k in callback_on_step_end_tensor_inputs:
1024
- callback_kwargs[k] = locals()[k]
1025
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1026
-
1027
- latents = callback_outputs.pop("latents", latents)
1028
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1029
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1030
-
1031
- # call the callback, if provided
1032
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1033
- progress_bar.update()
1034
- if callback is not None and i % callback_steps == 0:
1035
- step_idx = i // getattr(self.scheduler, "order", 1)
1036
- callback(step_idx, t, latents)
1037
-
1038
- if not output_type == "latent":
1039
- # make sure the VAE is in float32 mode, as it overflows in float16
1040
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1041
- if needs_upcasting:
1042
- self.upcast_vae()
1043
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1044
-
1045
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1046
-
1047
- # cast back to fp16 if needed
1048
- if needs_upcasting:
1049
- self.vae.to(dtype=torch.float16)
1050
- else:
1051
- image = latents
1052
-
1053
- if not output_type == "latent":
1054
- # apply watermark if available
1055
- if self.watermark is not None:
1056
- image = self.watermark.apply_watermark(image)
1057
-
1058
- image = self.image_processor.postprocess(image, output_type=output_type)
1059
-
1060
- # Offload all models
1061
- self.maybe_free_model_hooks()
1062
-
1063
- if not return_dict:
1064
- return (image,)
1065
-
1066
- return StableDiffusionXLPipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_stable_diffusion_xl_ipex.py DELETED
@@ -1,1434 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
-
18
- import intel_extension_for_pytorch as ipex
19
- import torch
20
- from transformers import (
21
- CLIPImageProcessor,
22
- CLIPTextModel,
23
- CLIPTextModelWithProjection,
24
- CLIPTokenizer,
25
- CLIPVisionModelWithProjection,
26
- )
27
-
28
- from diffusers import StableDiffusionXLPipeline
29
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
- from diffusers.loaders import (
31
- StableDiffusionXLLoraLoaderMixin,
32
- TextualInversionLoaderMixin,
33
- )
34
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
35
- from diffusers.models.attention_processor import (
36
- AttnProcessor2_0,
37
- LoRAAttnProcessor2_0,
38
- LoRAXFormersAttnProcessor,
39
- XFormersAttnProcessor,
40
- )
41
- from diffusers.models.lora import adjust_lora_scale_text_encoder
42
- from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
43
- from diffusers.schedulers import KarrasDiffusionSchedulers
44
- from diffusers.utils import (
45
- USE_PEFT_BACKEND,
46
- deprecate,
47
- is_invisible_watermark_available,
48
- is_torch_xla_available,
49
- logging,
50
- replace_example_docstring,
51
- scale_lora_layers,
52
- unscale_lora_layers,
53
- )
54
- from diffusers.utils.torch_utils import randn_tensor
55
-
56
-
57
- if is_invisible_watermark_available():
58
- from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
59
-
60
- if is_torch_xla_available():
61
- import torch_xla.core.xla_model as xm
62
-
63
- XLA_AVAILABLE = True
64
- else:
65
- XLA_AVAILABLE = False
66
-
67
-
68
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
69
-
70
- EXAMPLE_DOC_STRING = """
71
- Examples:
72
- ```py
73
- >>> import torch
74
- >>> from diffusers import StableDiffusionXLPipelineIpex
75
-
76
- >>> # SDXL-Turbo, a distilled version of SDXL 1.0, trained for real-time synthesis
77
- >>> pipe = StableDiffusionXLPipelineIpex.from_pretrained(
78
- ... "stabilityai/sdxl-turbo", low_cpu_mem_usage=True, use_safetensors=True
79
- ... )
80
-
81
- >>> num_inference_steps = 1
82
- >>> guidance_scale = 0.0
83
- >>> use_bf16 = True
84
- >>> data_type = torch.bfloat16 if use_bf16 else torch.float32
85
- >>> prompt = "a photo of an astronaut riding a horse on mars"
86
-
87
- >>> # value of image height/width should be consistent with the pipeline inference
88
- >>> # For Float32
89
- >>> pipe.prepare_for_ipex(torch.float32, prompt, height=512, width=512)
90
- >>> # For BFloat16
91
- >>> pipe.prepare_for_ipex(torch.bfloat16, prompt, height=512, width=512)
92
-
93
- >>> # value of image height/width should be consistent with 'prepare_for_ipex()'
94
- >>> # For Float32
95
- >>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512, guidance_scale=guidance_scale).images[0]
96
- >>> # For BFloat16
97
- >>> with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
98
- >>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512, guidance_scale=guidance_scale).images[0]
99
- ```
100
- """
101
-
102
-
103
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
104
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
105
- """
106
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
107
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
108
- """
109
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
110
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
111
- # rescale the results from guidance (fixes overexposure)
112
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
113
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
114
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
115
- return noise_cfg
116
-
117
-
118
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
119
- def retrieve_timesteps(
120
- scheduler,
121
- num_inference_steps: Optional[int] = None,
122
- device: Optional[Union[str, torch.device]] = None,
123
- timesteps: Optional[List[int]] = None,
124
- **kwargs,
125
- ):
126
- """
127
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
128
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
129
-
130
- Args:
131
- scheduler (`SchedulerMixin`):
132
- The scheduler to get timesteps from.
133
- num_inference_steps (`int`):
134
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
135
- `timesteps` must be `None`.
136
- device (`str` or `torch.device`, *optional*):
137
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
138
- timesteps (`List[int]`, *optional*):
139
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
140
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
141
- must be `None`.
142
-
143
- Returns:
144
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
145
- second element is the number of inference steps.
146
- """
147
- if timesteps is not None:
148
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
149
- if not accepts_timesteps:
150
- raise ValueError(
151
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
152
- f" timestep schedules. Please check whether you are using the correct scheduler."
153
- )
154
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
155
- timesteps = scheduler.timesteps
156
- num_inference_steps = len(timesteps)
157
- else:
158
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
159
- timesteps = scheduler.timesteps
160
- return timesteps, num_inference_steps
161
-
162
-
163
- class StableDiffusionXLPipelineIpex(
164
- StableDiffusionXLPipeline,
165
- ):
166
- r"""
167
- Pipeline for text-to-image generation using Stable Diffusion XL on IPEX.
168
-
169
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
170
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
171
-
172
- In addition the pipeline inherits the following loading methods:
173
- - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]
174
- - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
175
-
176
- as well as the following saving methods:
177
- - *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]
178
-
179
- Args:
180
- vae ([`AutoencoderKL`]):
181
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
182
- text_encoder ([`CLIPTextModel`]):
183
- Frozen text-encoder. Stable Diffusion XL uses the text portion of
184
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
185
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
186
- text_encoder_2 ([` CLIPTextModelWithProjection`]):
187
- Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
188
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
189
- specifically the
190
- [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
191
- variant.
192
- tokenizer (`CLIPTokenizer`):
193
- Tokenizer of class
194
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
195
- tokenizer_2 (`CLIPTokenizer`):
196
- Second Tokenizer of class
197
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
198
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
199
- scheduler ([`SchedulerMixin`]):
200
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
201
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
202
- force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
203
- Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
204
- `stabilityai/stable-diffusion-xl-base-1-0`.
205
- add_watermarker (`bool`, *optional*):
206
- Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
207
- watermark output images. If not defined, it will default to True if the package is installed, otherwise no
208
- watermarker will be used.
209
- """
210
-
211
- model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
212
- _optional_components = [
213
- "tokenizer",
214
- "tokenizer_2",
215
- "text_encoder",
216
- "text_encoder_2",
217
- "image_encoder",
218
- "feature_extractor",
219
- ]
220
- _callback_tensor_inputs = [
221
- "latents",
222
- "prompt_embeds",
223
- "negative_prompt_embeds",
224
- "add_text_embeds",
225
- "add_time_ids",
226
- "negative_pooled_prompt_embeds",
227
- "negative_add_time_ids",
228
- ]
229
-
230
- def __init__(
231
- self,
232
- vae: AutoencoderKL,
233
- text_encoder: CLIPTextModel,
234
- text_encoder_2: CLIPTextModelWithProjection,
235
- tokenizer: CLIPTokenizer,
236
- tokenizer_2: CLIPTokenizer,
237
- unet: UNet2DConditionModel,
238
- scheduler: KarrasDiffusionSchedulers,
239
- image_encoder: CLIPVisionModelWithProjection = None,
240
- feature_extractor: CLIPImageProcessor = None,
241
- force_zeros_for_empty_prompt: bool = True,
242
- add_watermarker: Optional[bool] = None,
243
- ):
244
- # super().__init__()
245
-
246
- self.register_modules(
247
- vae=vae,
248
- text_encoder=text_encoder,
249
- text_encoder_2=text_encoder_2,
250
- tokenizer=tokenizer,
251
- tokenizer_2=tokenizer_2,
252
- unet=unet,
253
- scheduler=scheduler,
254
- image_encoder=image_encoder,
255
- feature_extractor=feature_extractor,
256
- )
257
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
258
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
259
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
260
-
261
- self.default_sample_size = self.unet.config.sample_size
262
-
263
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
264
-
265
- if add_watermarker:
266
- self.watermark = StableDiffusionXLWatermarker()
267
- else:
268
- self.watermark = None
269
-
270
- def encode_prompt(
271
- self,
272
- prompt: str,
273
- prompt_2: Optional[str] = None,
274
- device: Optional[torch.device] = None,
275
- num_images_per_prompt: int = 1,
276
- do_classifier_free_guidance: bool = True,
277
- negative_prompt: Optional[str] = None,
278
- negative_prompt_2: Optional[str] = None,
279
- prompt_embeds: Optional[torch.Tensor] = None,
280
- negative_prompt_embeds: Optional[torch.Tensor] = None,
281
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
282
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
283
- lora_scale: Optional[float] = None,
284
- clip_skip: Optional[int] = None,
285
- ):
286
- r"""
287
- Encodes the prompt into text encoder hidden states.
288
-
289
- Args:
290
- prompt (`str` or `List[str]`, *optional*):
291
- prompt to be encoded
292
- prompt_2 (`str` or `List[str]`, *optional*):
293
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
294
- used in both text-encoders
295
- device: (`torch.device`):
296
- torch device
297
- num_images_per_prompt (`int`):
298
- number of images that should be generated per prompt
299
- do_classifier_free_guidance (`bool`):
300
- whether to use classifier free guidance or not
301
- negative_prompt (`str` or `List[str]`, *optional*):
302
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
303
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
304
- less than `1`).
305
- negative_prompt_2 (`str` or `List[str]`, *optional*):
306
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
307
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
308
- prompt_embeds (`torch.Tensor`, *optional*):
309
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
310
- provided, text embeddings will be generated from `prompt` input argument.
311
- negative_prompt_embeds (`torch.Tensor`, *optional*):
312
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
313
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
314
- argument.
315
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
316
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
317
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
318
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
319
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
320
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
321
- input argument.
322
- lora_scale (`float`, *optional*):
323
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
324
- clip_skip (`int`, *optional*):
325
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
326
- the output of the pre-final layer will be used for computing the prompt embeddings.
327
- """
328
- device = device or self._execution_device
329
-
330
- # set lora scale so that monkey patched LoRA
331
- # function of text encoder can correctly access it
332
- if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
333
- self._lora_scale = lora_scale
334
-
335
- # dynamically adjust the LoRA scale
336
- if self.text_encoder is not None:
337
- if not USE_PEFT_BACKEND:
338
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
339
- else:
340
- scale_lora_layers(self.text_encoder, lora_scale)
341
-
342
- if self.text_encoder_2 is not None:
343
- if not USE_PEFT_BACKEND:
344
- adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
345
- else:
346
- scale_lora_layers(self.text_encoder_2, lora_scale)
347
-
348
- prompt = [prompt] if isinstance(prompt, str) else prompt
349
-
350
- if prompt is not None:
351
- batch_size = len(prompt)
352
- else:
353
- batch_size = prompt_embeds.shape[0]
354
-
355
- # Define tokenizers and text encoders
356
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
357
- text_encoders = (
358
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
359
- )
360
-
361
- if prompt_embeds is None:
362
- prompt_2 = prompt_2 or prompt
363
- prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
364
-
365
- # textual inversion: procecss multi-vector tokens if necessary
366
- prompt_embeds_list = []
367
- prompts = [prompt, prompt_2]
368
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
369
- if isinstance(self, TextualInversionLoaderMixin):
370
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
371
-
372
- text_inputs = tokenizer(
373
- prompt,
374
- padding="max_length",
375
- max_length=tokenizer.model_max_length,
376
- truncation=True,
377
- return_tensors="pt",
378
- )
379
-
380
- text_input_ids = text_inputs.input_ids
381
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
382
-
383
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
384
- text_input_ids, untruncated_ids
385
- ):
386
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
387
- logger.warning(
388
- "The following part of your input was truncated because CLIP can only handle sequences up to"
389
- f" {tokenizer.model_max_length} tokens: {removed_text}"
390
- )
391
-
392
- prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
393
-
394
- # We are only ALWAYS interested in the pooled output of the final text encoder
395
- pooled_prompt_embeds = prompt_embeds[0]
396
- if clip_skip is None:
397
- prompt_embeds = prompt_embeds.hidden_states[-2]
398
- else:
399
- # "2" because SDXL always indexes from the penultimate layer.
400
- prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
401
-
402
- prompt_embeds_list.append(prompt_embeds)
403
-
404
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
405
-
406
- # get unconditional embeddings for classifier free guidance
407
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
408
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
409
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
410
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
411
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
412
- negative_prompt = negative_prompt or ""
413
- negative_prompt_2 = negative_prompt_2 or negative_prompt
414
-
415
- # normalize str to list
416
- negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
417
- negative_prompt_2 = (
418
- batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
419
- )
420
-
421
- uncond_tokens: List[str]
422
- if prompt is not None and type(prompt) is not type(negative_prompt):
423
- raise TypeError(
424
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
425
- f" {type(prompt)}."
426
- )
427
- elif batch_size != len(negative_prompt):
428
- raise ValueError(
429
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
430
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
431
- " the batch size of `prompt`."
432
- )
433
- else:
434
- uncond_tokens = [negative_prompt, negative_prompt_2]
435
-
436
- negative_prompt_embeds_list = []
437
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
438
- if isinstance(self, TextualInversionLoaderMixin):
439
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
440
-
441
- max_length = prompt_embeds.shape[1]
442
- uncond_input = tokenizer(
443
- negative_prompt,
444
- padding="max_length",
445
- max_length=max_length,
446
- truncation=True,
447
- return_tensors="pt",
448
- )
449
-
450
- negative_prompt_embeds = text_encoder(
451
- uncond_input.input_ids.to(device),
452
- output_hidden_states=True,
453
- )
454
- # We are only ALWAYS interested in the pooled output of the final text encoder
455
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
456
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
457
-
458
- negative_prompt_embeds_list.append(negative_prompt_embeds)
459
-
460
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
461
-
462
- if self.text_encoder_2 is not None:
463
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
464
- else:
465
- prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
466
-
467
- bs_embed, seq_len, _ = prompt_embeds.shape
468
- # duplicate text embeddings for each generation per prompt, using mps friendly method
469
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
470
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
471
-
472
- if do_classifier_free_guidance:
473
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
474
- seq_len = negative_prompt_embeds.shape[1]
475
-
476
- if self.text_encoder_2 is not None:
477
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
478
- else:
479
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
480
-
481
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
482
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
483
-
484
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
485
- bs_embed * num_images_per_prompt, -1
486
- )
487
- if do_classifier_free_guidance:
488
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
489
- bs_embed * num_images_per_prompt, -1
490
- )
491
-
492
- if self.text_encoder is not None:
493
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
494
- # Retrieve the original scale by scaling back the LoRA layers
495
- unscale_lora_layers(self.text_encoder, lora_scale)
496
-
497
- if self.text_encoder_2 is not None:
498
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
499
- # Retrieve the original scale by scaling back the LoRA layers
500
- unscale_lora_layers(self.text_encoder_2, lora_scale)
501
-
502
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
503
-
504
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
505
- def encode_image(self, image, device, num_images_per_prompt):
506
- dtype = next(self.image_encoder.parameters()).dtype
507
-
508
- if not isinstance(image, torch.Tensor):
509
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
510
-
511
- image = image.to(device=device, dtype=dtype)
512
- image_embeds = self.image_encoder(image).image_embeds
513
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
514
-
515
- uncond_image_embeds = torch.zeros_like(image_embeds)
516
- return image_embeds, uncond_image_embeds
517
-
518
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
519
- def prepare_extra_step_kwargs(self, generator, eta):
520
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
521
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
522
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
523
- # and should be between [0, 1]
524
-
525
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
526
- extra_step_kwargs = {}
527
- if accepts_eta:
528
- extra_step_kwargs["eta"] = eta
529
-
530
- # check if the scheduler accepts generator
531
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
532
- if accepts_generator:
533
- extra_step_kwargs["generator"] = generator
534
- return extra_step_kwargs
535
-
536
- def check_inputs(
537
- self,
538
- prompt,
539
- prompt_2,
540
- height,
541
- width,
542
- callback_steps,
543
- negative_prompt=None,
544
- negative_prompt_2=None,
545
- prompt_embeds=None,
546
- negative_prompt_embeds=None,
547
- pooled_prompt_embeds=None,
548
- negative_pooled_prompt_embeds=None,
549
- callback_on_step_end_tensor_inputs=None,
550
- ):
551
- if height % 8 != 0 or width % 8 != 0:
552
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
553
-
554
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
555
- raise ValueError(
556
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
557
- f" {type(callback_steps)}."
558
- )
559
-
560
- if callback_on_step_end_tensor_inputs is not None and not all(
561
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
562
- ):
563
- raise ValueError(
564
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
565
- )
566
-
567
- if prompt is not None and prompt_embeds is not None:
568
- raise ValueError(
569
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
570
- " only forward one of the two."
571
- )
572
- elif prompt_2 is not None and prompt_embeds is not None:
573
- raise ValueError(
574
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
575
- " only forward one of the two."
576
- )
577
- elif prompt is None and prompt_embeds is None:
578
- raise ValueError(
579
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
580
- )
581
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
582
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
583
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
584
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
585
-
586
- if negative_prompt is not None and negative_prompt_embeds is not None:
587
- raise ValueError(
588
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
589
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
590
- )
591
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
592
- raise ValueError(
593
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
594
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
595
- )
596
-
597
- if prompt_embeds is not None and negative_prompt_embeds is not None:
598
- if prompt_embeds.shape != negative_prompt_embeds.shape:
599
- raise ValueError(
600
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
601
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
602
- f" {negative_prompt_embeds.shape}."
603
- )
604
-
605
- if prompt_embeds is not None and pooled_prompt_embeds is None:
606
- raise ValueError(
607
- "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
608
- )
609
-
610
- if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
611
- raise ValueError(
612
- "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
613
- )
614
-
615
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
616
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
617
- shape = (
618
- batch_size,
619
- num_channels_latents,
620
- int(height) // self.vae_scale_factor,
621
- int(width) // self.vae_scale_factor,
622
- )
623
- if isinstance(generator, list) and len(generator) != batch_size:
624
- raise ValueError(
625
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
626
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
627
- )
628
-
629
- if latents is None:
630
- latents = randn_tensor(shape, generator=generator, device=device, dtype=torch.float32)
631
- else:
632
- latents = latents.to(device)
633
-
634
- # scale the initial noise by the standard deviation required by the scheduler
635
- latents = latents * self.scheduler.init_noise_sigma
636
- return latents
637
-
638
- def _get_add_time_ids(
639
- self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
640
- ):
641
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
642
-
643
- passed_add_embed_dim = (
644
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
645
- )
646
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
647
-
648
- if expected_add_embed_dim != passed_add_embed_dim:
649
- raise ValueError(
650
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
651
- )
652
-
653
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
654
- return add_time_ids
655
-
656
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
657
- def upcast_vae(self):
658
- dtype = self.vae.dtype
659
- self.vae.to(dtype=torch.float32)
660
- use_torch_2_0_or_xformers = isinstance(
661
- self.vae.decoder.mid_block.attentions[0].processor,
662
- (
663
- AttnProcessor2_0,
664
- XFormersAttnProcessor,
665
- LoRAXFormersAttnProcessor,
666
- LoRAAttnProcessor2_0,
667
- ),
668
- )
669
- # if xformers or torch_2_0 is used attention block does not need
670
- # to be in float32 which can save lots of memory
671
- if use_torch_2_0_or_xformers:
672
- self.vae.post_quant_conv.to(dtype)
673
- self.vae.decoder.conv_in.to(dtype)
674
- self.vae.decoder.mid_block.to(dtype)
675
-
676
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
677
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
678
- """
679
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
680
-
681
- Args:
682
- timesteps (`torch.Tensor`):
683
- generate embedding vectors at these timesteps
684
- embedding_dim (`int`, *optional*, defaults to 512):
685
- dimension of the embeddings to generate
686
- dtype:
687
- data type of the generated embeddings
688
-
689
- Returns:
690
- `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
691
- """
692
- assert len(w.shape) == 1
693
- w = w * 1000.0
694
-
695
- half_dim = embedding_dim // 2
696
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
697
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
698
- emb = w.to(dtype)[:, None] * emb[None, :]
699
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
700
- if embedding_dim % 2 == 1: # zero pad
701
- emb = torch.nn.functional.pad(emb, (0, 1))
702
- assert emb.shape == (w.shape[0], embedding_dim)
703
- return emb
704
-
705
- @property
706
- def guidance_scale(self):
707
- return self._guidance_scale
708
-
709
- @property
710
- def guidance_rescale(self):
711
- return self._guidance_rescale
712
-
713
- @property
714
- def clip_skip(self):
715
- return self._clip_skip
716
-
717
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
718
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
719
- # corresponds to doing no classifier free guidance.
720
- @property
721
- def do_classifier_free_guidance(self):
722
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
723
-
724
- @property
725
- def cross_attention_kwargs(self):
726
- return self._cross_attention_kwargs
727
-
728
- @property
729
- def denoising_end(self):
730
- return self._denoising_end
731
-
732
- @property
733
- def num_timesteps(self):
734
- return self._num_timesteps
735
-
736
- @torch.no_grad()
737
- @replace_example_docstring(EXAMPLE_DOC_STRING)
738
- def __call__(
739
- self,
740
- prompt: Union[str, List[str]] = None,
741
- prompt_2: Optional[Union[str, List[str]]] = None,
742
- height: Optional[int] = None,
743
- width: Optional[int] = None,
744
- num_inference_steps: int = 50,
745
- timesteps: List[int] = None,
746
- denoising_end: Optional[float] = None,
747
- guidance_scale: float = 5.0,
748
- negative_prompt: Optional[Union[str, List[str]]] = None,
749
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
750
- num_images_per_prompt: Optional[int] = 1,
751
- eta: float = 0.0,
752
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
753
- latents: Optional[torch.Tensor] = None,
754
- prompt_embeds: Optional[torch.Tensor] = None,
755
- negative_prompt_embeds: Optional[torch.Tensor] = None,
756
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
757
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
758
- ip_adapter_image: Optional[PipelineImageInput] = None,
759
- output_type: Optional[str] = "pil",
760
- return_dict: bool = True,
761
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
762
- guidance_rescale: float = 0.0,
763
- original_size: Optional[Tuple[int, int]] = None,
764
- crops_coords_top_left: Tuple[int, int] = (0, 0),
765
- target_size: Optional[Tuple[int, int]] = None,
766
- negative_original_size: Optional[Tuple[int, int]] = None,
767
- negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
768
- negative_target_size: Optional[Tuple[int, int]] = None,
769
- clip_skip: Optional[int] = None,
770
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
771
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
772
- **kwargs,
773
- ):
774
- r"""
775
- Function invoked when calling the pipeline for generation.
776
-
777
- Args:
778
- prompt (`str` or `List[str]`, *optional*):
779
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
780
- instead.
781
- prompt_2 (`str` or `List[str]`, *optional*):
782
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
783
- used in both text-encoders
784
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
785
- The height in pixels of the generated image. This is set to 1024 by default for the best results.
786
- Anything below 512 pixels won't work well for
787
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
788
- and checkpoints that are not specifically fine-tuned on low resolutions.
789
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
790
- The width in pixels of the generated image. This is set to 1024 by default for the best results.
791
- Anything below 512 pixels won't work well for
792
- [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
793
- and checkpoints that are not specifically fine-tuned on low resolutions.
794
- num_inference_steps (`int`, *optional*, defaults to 50):
795
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
796
- expense of slower inference.
797
- timesteps (`List[int]`, *optional*):
798
- Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
799
- in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
800
- passed will be used. Must be in descending order.
801
- denoising_end (`float`, *optional*):
802
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
803
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
804
- still retain a substantial amount of noise as determined by the discrete timesteps selected by the
805
- scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
806
- "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
807
- Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
808
- guidance_scale (`float`, *optional*, defaults to 5.0):
809
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
810
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
811
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
812
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
813
- usually at the expense of lower image quality.
814
- negative_prompt (`str` or `List[str]`, *optional*):
815
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
816
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
817
- less than `1`).
818
- negative_prompt_2 (`str` or `List[str]`, *optional*):
819
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
820
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
821
- num_images_per_prompt (`int`, *optional*, defaults to 1):
822
- The number of images to generate per prompt.
823
- eta (`float`, *optional*, defaults to 0.0):
824
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
825
- [`schedulers.DDIMScheduler`], will be ignored for others.
826
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
827
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
828
- to make generation deterministic.
829
- latents (`torch.Tensor`, *optional*):
830
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
831
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
832
- tensor will ge generated by sampling using the supplied random `generator`.
833
- prompt_embeds (`torch.Tensor`, *optional*):
834
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
835
- provided, text embeddings will be generated from `prompt` input argument.
836
- negative_prompt_embeds (`torch.Tensor`, *optional*):
837
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
838
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
839
- argument.
840
- pooled_prompt_embeds (`torch.Tensor`, *optional*):
841
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
842
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
843
- negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
844
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
845
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
846
- input argument.
847
- ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
848
- output_type (`str`, *optional*, defaults to `"pil"`):
849
- The output format of the generate image. Choose between
850
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
851
- return_dict (`bool`, *optional*, defaults to `True`):
852
- Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
853
- of a plain tuple.
854
- cross_attention_kwargs (`dict`, *optional*):
855
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
856
- `self.processor` in
857
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
858
- guidance_rescale (`float`, *optional*, defaults to 0.0):
859
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
860
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
861
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
862
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
863
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
864
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
865
- `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
866
- explained in section 2.2 of
867
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
868
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
869
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
870
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
871
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
872
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
873
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
874
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
875
- not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
876
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
877
- negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
878
- To negatively condition the generation process based on a specific image resolution. Part of SDXL's
879
- micro-conditioning as explained in section 2.2 of
880
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
881
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
882
- negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
883
- To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
884
- micro-conditioning as explained in section 2.2 of
885
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
886
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
887
- negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
888
- To negatively condition the generation process based on a target image resolution. It should be as same
889
- as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
890
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
891
- information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
892
- callback_on_step_end (`Callable`, *optional*):
893
- A function that calls at the end of each denoising steps during the inference. The function is called
894
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
895
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
896
- `callback_on_step_end_tensor_inputs`.
897
- callback_on_step_end_tensor_inputs (`List`, *optional*):
898
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
899
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
900
- `._callback_tensor_inputs` attribute of your pipeline class.
901
-
902
- Examples:
903
-
904
- Returns:
905
- [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
906
- [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
907
- `tuple`. When returning a tuple, the first element is a list with the generated images.
908
- """
909
-
910
- callback = kwargs.pop("callback", None)
911
- callback_steps = kwargs.pop("callback_steps", None)
912
-
913
- if callback is not None:
914
- deprecate(
915
- "callback",
916
- "1.0.0",
917
- "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
918
- )
919
- if callback_steps is not None:
920
- deprecate(
921
- "callback_steps",
922
- "1.0.0",
923
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
924
- )
925
-
926
- # 0. Default height and width to unet
927
- height = height or self.default_sample_size * self.vae_scale_factor
928
- width = width or self.default_sample_size * self.vae_scale_factor
929
-
930
- original_size = original_size or (height, width)
931
- target_size = target_size or (height, width)
932
-
933
- # 1. Check inputs. Raise error if not correct
934
- self.check_inputs(
935
- prompt,
936
- prompt_2,
937
- height,
938
- width,
939
- callback_steps,
940
- negative_prompt,
941
- negative_prompt_2,
942
- prompt_embeds,
943
- negative_prompt_embeds,
944
- pooled_prompt_embeds,
945
- negative_pooled_prompt_embeds,
946
- callback_on_step_end_tensor_inputs,
947
- )
948
-
949
- self._guidance_scale = guidance_scale
950
- self._guidance_rescale = guidance_rescale
951
- self._clip_skip = clip_skip
952
- self._cross_attention_kwargs = cross_attention_kwargs
953
- self._denoising_end = denoising_end
954
-
955
- # 2. Define call parameters
956
- if prompt is not None and isinstance(prompt, str):
957
- batch_size = 1
958
- elif prompt is not None and isinstance(prompt, list):
959
- batch_size = len(prompt)
960
- else:
961
- batch_size = prompt_embeds.shape[0]
962
-
963
- device = self._execution_device
964
-
965
- # 3. Encode input prompt
966
- lora_scale = (
967
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
968
- )
969
-
970
- (
971
- prompt_embeds,
972
- negative_prompt_embeds,
973
- pooled_prompt_embeds,
974
- negative_pooled_prompt_embeds,
975
- ) = self.encode_prompt(
976
- prompt=prompt,
977
- prompt_2=prompt_2,
978
- device=device,
979
- num_images_per_prompt=num_images_per_prompt,
980
- do_classifier_free_guidance=self.do_classifier_free_guidance,
981
- negative_prompt=negative_prompt,
982
- negative_prompt_2=negative_prompt_2,
983
- prompt_embeds=prompt_embeds,
984
- negative_prompt_embeds=negative_prompt_embeds,
985
- pooled_prompt_embeds=pooled_prompt_embeds,
986
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
987
- lora_scale=lora_scale,
988
- clip_skip=self.clip_skip,
989
- )
990
-
991
- # 4. Prepare timesteps
992
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
993
-
994
- # 5. Prepare latent variables
995
- num_channels_latents = self.unet.config.in_channels
996
- latents = self.prepare_latents(
997
- batch_size * num_images_per_prompt,
998
- num_channels_latents,
999
- height,
1000
- width,
1001
- prompt_embeds.dtype,
1002
- device,
1003
- generator,
1004
- latents,
1005
- )
1006
-
1007
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1008
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1009
-
1010
- # 7. Prepare added time ids & embeddings
1011
- add_text_embeds = pooled_prompt_embeds
1012
- if self.text_encoder_2 is None:
1013
- text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1014
- else:
1015
- text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1016
-
1017
- add_time_ids = self._get_add_time_ids(
1018
- original_size,
1019
- crops_coords_top_left,
1020
- target_size,
1021
- dtype=prompt_embeds.dtype,
1022
- text_encoder_projection_dim=text_encoder_projection_dim,
1023
- )
1024
- if negative_original_size is not None and negative_target_size is not None:
1025
- negative_add_time_ids = self._get_add_time_ids(
1026
- negative_original_size,
1027
- negative_crops_coords_top_left,
1028
- negative_target_size,
1029
- dtype=prompt_embeds.dtype,
1030
- text_encoder_projection_dim=text_encoder_projection_dim,
1031
- )
1032
- else:
1033
- negative_add_time_ids = add_time_ids
1034
-
1035
- if self.do_classifier_free_guidance:
1036
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1037
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1038
- add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1039
-
1040
- prompt_embeds = prompt_embeds.to(device)
1041
- add_text_embeds = add_text_embeds.to(device)
1042
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1043
-
1044
- if ip_adapter_image is not None:
1045
- image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
1046
- if self.do_classifier_free_guidance:
1047
- image_embeds = torch.cat([negative_image_embeds, image_embeds])
1048
- image_embeds = image_embeds.to(device)
1049
-
1050
- # 8. Denoising loop
1051
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1052
-
1053
- # 8.1 Apply denoising_end
1054
- if (
1055
- self.denoising_end is not None
1056
- and isinstance(self.denoising_end, float)
1057
- and self.denoising_end > 0
1058
- and self.denoising_end < 1
1059
- ):
1060
- discrete_timestep_cutoff = int(
1061
- round(
1062
- self.scheduler.config.num_train_timesteps
1063
- - (self.denoising_end * self.scheduler.config.num_train_timesteps)
1064
- )
1065
- )
1066
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1067
- timesteps = timesteps[:num_inference_steps]
1068
-
1069
- # 9. Optionally get Guidance Scale Embedding
1070
- timestep_cond = None
1071
- if self.unet.config.time_cond_proj_dim is not None:
1072
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1073
- timestep_cond = self.get_guidance_scale_embedding(
1074
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1075
- ).to(device=device, dtype=latents.dtype)
1076
-
1077
- self._num_timesteps = len(timesteps)
1078
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1079
- for i, t in enumerate(timesteps):
1080
- # expand the latents if we are doing classifier free guidance
1081
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1082
-
1083
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1084
-
1085
- # predict the noise residual
1086
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1087
- if ip_adapter_image is not None:
1088
- added_cond_kwargs["image_embeds"] = image_embeds
1089
-
1090
- # noise_pred = self.unet(
1091
- # latent_model_input,
1092
- # t,
1093
- # encoder_hidden_states=prompt_embeds,
1094
- # timestep_cond=timestep_cond,
1095
- # cross_attention_kwargs=self.cross_attention_kwargs,
1096
- # added_cond_kwargs=added_cond_kwargs,
1097
- # return_dict=False,
1098
- # )[0]
1099
-
1100
- noise_pred = self.unet(
1101
- latent_model_input,
1102
- t,
1103
- encoder_hidden_states=prompt_embeds,
1104
- added_cond_kwargs=added_cond_kwargs,
1105
- )["sample"]
1106
-
1107
- # perform guidance
1108
- if self.do_classifier_free_guidance:
1109
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1110
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1111
-
1112
- if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1113
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1114
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1115
-
1116
- # compute the previous noisy sample x_t -> x_t-1
1117
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1118
-
1119
- if callback_on_step_end is not None:
1120
- callback_kwargs = {}
1121
- for k in callback_on_step_end_tensor_inputs:
1122
- callback_kwargs[k] = locals()[k]
1123
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1124
-
1125
- latents = callback_outputs.pop("latents", latents)
1126
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1127
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1128
- add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
1129
- negative_pooled_prompt_embeds = callback_outputs.pop(
1130
- "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1131
- )
1132
- add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
1133
- negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
1134
-
1135
- # call the callback, if provided
1136
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1137
- progress_bar.update()
1138
- if callback is not None and i % callback_steps == 0:
1139
- step_idx = i // getattr(self.scheduler, "order", 1)
1140
- callback(step_idx, t, latents)
1141
-
1142
- if XLA_AVAILABLE:
1143
- xm.mark_step()
1144
-
1145
- if not output_type == "latent":
1146
- # make sure the VAE is in float32 mode, as it overflows in float16
1147
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1148
-
1149
- if needs_upcasting:
1150
- self.upcast_vae()
1151
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1152
-
1153
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1154
-
1155
- # cast back to fp16 if needed
1156
- if needs_upcasting:
1157
- self.vae.to(dtype=torch.float16)
1158
- else:
1159
- image = latents
1160
-
1161
- if not output_type == "latent":
1162
- # apply watermark if available
1163
- if self.watermark is not None:
1164
- image = self.watermark.apply_watermark(image)
1165
-
1166
- image = self.image_processor.postprocess(image, output_type=output_type)
1167
-
1168
- # Offload all models
1169
- self.maybe_free_model_hooks()
1170
-
1171
- if not return_dict:
1172
- return (image,)
1173
-
1174
- return StableDiffusionXLPipelineOutput(images=image)
1175
-
1176
- @torch.no_grad()
1177
- def prepare_for_ipex(
1178
- self,
1179
- dtype=torch.float32,
1180
- prompt: Union[str, List[str]] = None,
1181
- prompt_2: Optional[Union[str, List[str]]] = None,
1182
- height: Optional[int] = None,
1183
- width: Optional[int] = None,
1184
- num_inference_steps: int = 50,
1185
- timesteps: List[int] = None,
1186
- denoising_end: Optional[float] = None,
1187
- guidance_scale: float = 5.0,
1188
- negative_prompt: Optional[Union[str, List[str]]] = None,
1189
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
1190
- num_images_per_prompt: Optional[int] = 1,
1191
- eta: float = 0.0,
1192
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1193
- latents: Optional[torch.Tensor] = None,
1194
- prompt_embeds: Optional[torch.Tensor] = None,
1195
- negative_prompt_embeds: Optional[torch.Tensor] = None,
1196
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
1197
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1198
- ip_adapter_image: Optional[PipelineImageInput] = None,
1199
- output_type: Optional[str] = "pil",
1200
- return_dict: bool = True,
1201
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1202
- guidance_rescale: float = 0.0,
1203
- original_size: Optional[Tuple[int, int]] = None,
1204
- crops_coords_top_left: Tuple[int, int] = (0, 0),
1205
- target_size: Optional[Tuple[int, int]] = None,
1206
- negative_original_size: Optional[Tuple[int, int]] = None,
1207
- negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
1208
- negative_target_size: Optional[Tuple[int, int]] = None,
1209
- clip_skip: Optional[int] = None,
1210
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1211
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1212
- **kwargs,
1213
- ):
1214
- callback = kwargs.pop("callback", None)
1215
- callback_steps = kwargs.pop("callback_steps", None)
1216
-
1217
- if callback is not None:
1218
- deprecate(
1219
- "callback",
1220
- "1.0.0",
1221
- "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
1222
- )
1223
- if callback_steps is not None:
1224
- deprecate(
1225
- "callback_steps",
1226
- "1.0.0",
1227
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
1228
- )
1229
-
1230
- # 0. Default height and width to unet
1231
- height = height or self.default_sample_size * self.vae_scale_factor
1232
- width = width or self.default_sample_size * self.vae_scale_factor
1233
-
1234
- original_size = original_size or (height, width)
1235
- target_size = target_size or (height, width)
1236
-
1237
- # 1. Check inputs. Raise error if not correct
1238
- self.check_inputs(
1239
- prompt,
1240
- prompt_2,
1241
- height,
1242
- width,
1243
- callback_steps,
1244
- negative_prompt,
1245
- negative_prompt_2,
1246
- prompt_embeds,
1247
- negative_prompt_embeds,
1248
- pooled_prompt_embeds,
1249
- negative_pooled_prompt_embeds,
1250
- callback_on_step_end_tensor_inputs,
1251
- )
1252
-
1253
- self._guidance_scale = guidance_scale
1254
- self._guidance_rescale = guidance_rescale
1255
- self._clip_skip = clip_skip
1256
- self._cross_attention_kwargs = cross_attention_kwargs
1257
- self._denoising_end = denoising_end
1258
-
1259
- # 2. Define call parameters
1260
- if prompt is not None and isinstance(prompt, str):
1261
- batch_size = 1
1262
- elif prompt is not None and isinstance(prompt, list):
1263
- batch_size = len(prompt)
1264
- else:
1265
- batch_size = prompt_embeds.shape[0]
1266
-
1267
- device = "cpu"
1268
- do_classifier_free_guidance = self.do_classifier_free_guidance
1269
-
1270
- # 3. Encode input prompt
1271
- lora_scale = (
1272
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1273
- )
1274
-
1275
- (
1276
- prompt_embeds,
1277
- negative_prompt_embeds,
1278
- pooled_prompt_embeds,
1279
- negative_pooled_prompt_embeds,
1280
- ) = self.encode_prompt(
1281
- prompt=prompt,
1282
- prompt_2=prompt_2,
1283
- device=device,
1284
- num_images_per_prompt=num_images_per_prompt,
1285
- do_classifier_free_guidance=self.do_classifier_free_guidance,
1286
- negative_prompt=negative_prompt,
1287
- negative_prompt_2=negative_prompt_2,
1288
- prompt_embeds=prompt_embeds,
1289
- negative_prompt_embeds=negative_prompt_embeds,
1290
- pooled_prompt_embeds=pooled_prompt_embeds,
1291
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1292
- lora_scale=lora_scale,
1293
- clip_skip=self.clip_skip,
1294
- )
1295
-
1296
- # 5. Prepare latent variables
1297
- num_channels_latents = self.unet.config.in_channels
1298
- latents = self.prepare_latents(
1299
- batch_size * num_images_per_prompt,
1300
- num_channels_latents,
1301
- height,
1302
- width,
1303
- prompt_embeds.dtype,
1304
- device,
1305
- generator,
1306
- latents,
1307
- )
1308
-
1309
- # 7. Prepare added time ids & embeddings
1310
- add_text_embeds = pooled_prompt_embeds
1311
- if self.text_encoder_2 is None:
1312
- text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1313
- else:
1314
- text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1315
-
1316
- add_time_ids = self._get_add_time_ids(
1317
- original_size,
1318
- crops_coords_top_left,
1319
- target_size,
1320
- dtype=prompt_embeds.dtype,
1321
- text_encoder_projection_dim=text_encoder_projection_dim,
1322
- )
1323
- if negative_original_size is not None and negative_target_size is not None:
1324
- negative_add_time_ids = self._get_add_time_ids(
1325
- negative_original_size,
1326
- negative_crops_coords_top_left,
1327
- negative_target_size,
1328
- dtype=prompt_embeds.dtype,
1329
- text_encoder_projection_dim=text_encoder_projection_dim,
1330
- )
1331
- else:
1332
- negative_add_time_ids = add_time_ids
1333
-
1334
- if self.do_classifier_free_guidance:
1335
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1336
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1337
- add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1338
-
1339
- prompt_embeds = prompt_embeds.to(device)
1340
- add_text_embeds = add_text_embeds.to(device)
1341
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1342
-
1343
- if ip_adapter_image is not None:
1344
- image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
1345
- if self.do_classifier_free_guidance:
1346
- image_embeds = torch.cat([negative_image_embeds, image_embeds])
1347
- image_embeds = image_embeds.to(device)
1348
-
1349
- dummy = torch.ones(1, dtype=torch.int32)
1350
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1351
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, dummy)
1352
-
1353
- # predict the noise residual
1354
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1355
- if ip_adapter_image is not None:
1356
- added_cond_kwargs["image_embeds"] = image_embeds
1357
-
1358
- if not output_type == "latent":
1359
- # make sure the VAE is in float32 mode, as it overflows in float16
1360
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1361
-
1362
- if needs_upcasting:
1363
- self.upcast_vae()
1364
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1365
-
1366
- # cast back to fp16 if needed
1367
- if needs_upcasting:
1368
- self.vae.to(dtype=torch.float16)
1369
-
1370
- self.unet = self.unet.to(memory_format=torch.channels_last)
1371
- self.vae.decoder = self.vae.decoder.to(memory_format=torch.channels_last)
1372
- self.text_encoder = self.text_encoder.to(memory_format=torch.channels_last)
1373
-
1374
- unet_input_example = {
1375
- "sample": latent_model_input,
1376
- "timestep": dummy,
1377
- "encoder_hidden_states": prompt_embeds,
1378
- "added_cond_kwargs": added_cond_kwargs,
1379
- }
1380
-
1381
- vae_decoder_input_example = latents
1382
-
1383
- # optimize with ipex
1384
- if dtype == torch.bfloat16:
1385
- self.unet = ipex.optimize(
1386
- self.unet.eval(),
1387
- dtype=torch.bfloat16,
1388
- inplace=True,
1389
- )
1390
- self.vae.decoder = ipex.optimize(self.vae.decoder.eval(), dtype=torch.bfloat16, inplace=True)
1391
- self.text_encoder = ipex.optimize(self.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
1392
- elif dtype == torch.float32:
1393
- self.unet = ipex.optimize(
1394
- self.unet.eval(),
1395
- dtype=torch.float32,
1396
- inplace=True,
1397
- level="O1",
1398
- weights_prepack=True,
1399
- auto_kernel_selection=False,
1400
- )
1401
- self.vae.decoder = ipex.optimize(
1402
- self.vae.decoder.eval(),
1403
- dtype=torch.float32,
1404
- inplace=True,
1405
- level="O1",
1406
- weights_prepack=True,
1407
- auto_kernel_selection=False,
1408
- )
1409
- self.text_encoder = ipex.optimize(
1410
- self.text_encoder.eval(),
1411
- dtype=torch.float32,
1412
- inplace=True,
1413
- level="O1",
1414
- weights_prepack=True,
1415
- auto_kernel_selection=False,
1416
- )
1417
- else:
1418
- raise ValueError(" The value of 'dtype' should be 'torch.bfloat16' or 'torch.float32' !")
1419
-
1420
- # trace unet model to get better performance on IPEX
1421
- with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
1422
- unet_trace_model = torch.jit.trace(
1423
- self.unet, example_kwarg_inputs=unet_input_example, check_trace=False, strict=False
1424
- )
1425
- unet_trace_model = torch.jit.freeze(unet_trace_model)
1426
- self.unet.forward = unet_trace_model.forward
1427
-
1428
- # trace vae.decoder model to get better performance on IPEX
1429
- with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
1430
- vae_decoder_trace_model = torch.jit.trace(
1431
- self.vae.decoder, vae_decoder_input_example, check_trace=False, strict=False
1432
- )
1433
- vae_decoder_trace_model = torch.jit.freeze(vae_decoder_trace_model)
1434
- self.vae.decoder.forward = vae_decoder_trace_model.forward
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pipeline_zero1to3.py DELETED
@@ -1,793 +0,0 @@
1
- # A diffuser version implementation of Zero1to3 (https://github.com/cvlab-columbia/zero123), ICCV 2023
2
- # by Xin Kong
3
-
4
- import inspect
5
- from typing import Any, Callable, Dict, List, Optional, Union
6
-
7
- import kornia
8
- import numpy as np
9
- import PIL.Image
10
- import torch
11
- from packaging import version
12
- from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection
13
-
14
- # from ...configuration_utils import FrozenDict
15
- # from ...models import AutoencoderKL, UNet2DConditionModel
16
- # from ...schedulers import KarrasDiffusionSchedulers
17
- # from ...utils import (
18
- # deprecate,
19
- # is_accelerate_available,
20
- # is_accelerate_version,
21
- # logging,
22
- # randn_tensor,
23
- # replace_example_docstring,
24
- # )
25
- # from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
26
- # from . import StableDiffusionPipelineOutput
27
- # from .safety_checker import StableDiffusionSafetyChecker
28
- from diffusers import AutoencoderKL, DiffusionPipeline, StableDiffusionMixin, UNet2DConditionModel
29
- from diffusers.configuration_utils import ConfigMixin, FrozenDict
30
- from diffusers.models.modeling_utils import ModelMixin
31
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
32
- from diffusers.schedulers import KarrasDiffusionSchedulers
33
- from diffusers.utils import (
34
- deprecate,
35
- logging,
36
- replace_example_docstring,
37
- )
38
- from diffusers.utils.torch_utils import randn_tensor
39
-
40
-
41
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
- # todo
43
- EXAMPLE_DOC_STRING = """
44
- Examples:
45
- ```py
46
- >>> import torch
47
- >>> from diffusers import StableDiffusionPipeline
48
-
49
- >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
50
- >>> pipe = pipe.to("cuda")
51
-
52
- >>> prompt = "a photo of an astronaut riding a horse on mars"
53
- >>> image = pipe(prompt).images[0]
54
- ```
55
- """
56
-
57
-
58
- class CCProjection(ModelMixin, ConfigMixin):
59
- def __init__(self, in_channel=772, out_channel=768):
60
- super().__init__()
61
- self.in_channel = in_channel
62
- self.out_channel = out_channel
63
- self.projection = torch.nn.Linear(in_channel, out_channel)
64
-
65
- def forward(self, x):
66
- return self.projection(x)
67
-
68
-
69
- class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
70
- r"""
71
- Pipeline for single view conditioned novel view generation using Zero1to3.
72
-
73
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
74
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
75
-
76
- Args:
77
- vae ([`AutoencoderKL`]):
78
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
79
- image_encoder ([`CLIPVisionModelWithProjection`]):
80
- Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of
81
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection),
82
- specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
83
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
84
- scheduler ([`SchedulerMixin`]):
85
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
86
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
87
- safety_checker ([`StableDiffusionSafetyChecker`]):
88
- Classification module that estimates whether generated images could be considered offensive or harmful.
89
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
90
- feature_extractor ([`CLIPFeatureExtractor`]):
91
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
92
- cc_projection ([`CCProjection`]):
93
- Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size.
94
- """
95
-
96
- _optional_components = ["safety_checker", "feature_extractor"]
97
-
98
- def __init__(
99
- self,
100
- vae: AutoencoderKL,
101
- image_encoder: CLIPVisionModelWithProjection,
102
- unet: UNet2DConditionModel,
103
- scheduler: KarrasDiffusionSchedulers,
104
- safety_checker: StableDiffusionSafetyChecker,
105
- feature_extractor: CLIPFeatureExtractor,
106
- cc_projection: CCProjection,
107
- requires_safety_checker: bool = True,
108
- ):
109
- super().__init__()
110
-
111
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
112
- deprecation_message = (
113
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
114
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
115
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
116
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
117
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
118
- " file"
119
- )
120
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
121
- new_config = dict(scheduler.config)
122
- new_config["steps_offset"] = 1
123
- scheduler._internal_dict = FrozenDict(new_config)
124
-
125
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
126
- deprecation_message = (
127
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
128
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
129
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
130
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
131
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
132
- )
133
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
134
- new_config = dict(scheduler.config)
135
- new_config["clip_sample"] = False
136
- scheduler._internal_dict = FrozenDict(new_config)
137
-
138
- if safety_checker is None and requires_safety_checker:
139
- logger.warning(
140
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
141
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
142
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
143
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
144
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
145
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
146
- )
147
-
148
- if safety_checker is not None and feature_extractor is None:
149
- raise ValueError(
150
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
151
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
152
- )
153
-
154
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
155
- version.parse(unet.config._diffusers_version).base_version
156
- ) < version.parse("0.9.0.dev0")
157
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
158
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
159
- deprecation_message = (
160
- "The configuration file of the unet has set the default `sample_size` to smaller than"
161
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
162
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
163
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
164
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
165
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
166
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
167
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
168
- " the `unet/config.json` file"
169
- )
170
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
171
- new_config = dict(unet.config)
172
- new_config["sample_size"] = 64
173
- unet._internal_dict = FrozenDict(new_config)
174
-
175
- self.register_modules(
176
- vae=vae,
177
- image_encoder=image_encoder,
178
- unet=unet,
179
- scheduler=scheduler,
180
- safety_checker=safety_checker,
181
- feature_extractor=feature_extractor,
182
- cc_projection=cc_projection,
183
- )
184
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
185
- self.register_to_config(requires_safety_checker=requires_safety_checker)
186
- # self.model_mode = None
187
-
188
- def _encode_prompt(
189
- self,
190
- prompt,
191
- device,
192
- num_images_per_prompt,
193
- do_classifier_free_guidance,
194
- negative_prompt=None,
195
- prompt_embeds: Optional[torch.Tensor] = None,
196
- negative_prompt_embeds: Optional[torch.Tensor] = None,
197
- ):
198
- r"""
199
- Encodes the prompt into text encoder hidden states.
200
-
201
- Args:
202
- prompt (`str` or `List[str]`, *optional*):
203
- prompt to be encoded
204
- device: (`torch.device`):
205
- torch device
206
- num_images_per_prompt (`int`):
207
- number of images that should be generated per prompt
208
- do_classifier_free_guidance (`bool`):
209
- whether to use classifier free guidance or not
210
- negative_prompt (`str` or `List[str]`, *optional*):
211
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
212
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
213
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
214
- prompt_embeds (`torch.Tensor`, *optional*):
215
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
216
- provided, text embeddings will be generated from `prompt` input argument.
217
- negative_prompt_embeds (`torch.Tensor`, *optional*):
218
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
219
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
220
- argument.
221
- """
222
- if prompt is not None and isinstance(prompt, str):
223
- batch_size = 1
224
- elif prompt is not None and isinstance(prompt, list):
225
- batch_size = len(prompt)
226
- else:
227
- batch_size = prompt_embeds.shape[0]
228
-
229
- if prompt_embeds is None:
230
- text_inputs = self.tokenizer(
231
- prompt,
232
- padding="max_length",
233
- max_length=self.tokenizer.model_max_length,
234
- truncation=True,
235
- return_tensors="pt",
236
- )
237
- text_input_ids = text_inputs.input_ids
238
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
239
-
240
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
241
- text_input_ids, untruncated_ids
242
- ):
243
- removed_text = self.tokenizer.batch_decode(
244
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
245
- )
246
- logger.warning(
247
- "The following part of your input was truncated because CLIP can only handle sequences up to"
248
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
249
- )
250
-
251
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
252
- attention_mask = text_inputs.attention_mask.to(device)
253
- else:
254
- attention_mask = None
255
-
256
- prompt_embeds = self.text_encoder(
257
- text_input_ids.to(device),
258
- attention_mask=attention_mask,
259
- )
260
- prompt_embeds = prompt_embeds[0]
261
-
262
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
263
-
264
- bs_embed, seq_len, _ = prompt_embeds.shape
265
- # duplicate text embeddings for each generation per prompt, using mps friendly method
266
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
267
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
268
-
269
- # get unconditional embeddings for classifier free guidance
270
- if do_classifier_free_guidance and negative_prompt_embeds is None:
271
- uncond_tokens: List[str]
272
- if negative_prompt is None:
273
- uncond_tokens = [""] * batch_size
274
- elif type(prompt) is not type(negative_prompt):
275
- raise TypeError(
276
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
277
- f" {type(prompt)}."
278
- )
279
- elif isinstance(negative_prompt, str):
280
- uncond_tokens = [negative_prompt]
281
- elif batch_size != len(negative_prompt):
282
- raise ValueError(
283
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
284
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
285
- " the batch size of `prompt`."
286
- )
287
- else:
288
- uncond_tokens = negative_prompt
289
-
290
- max_length = prompt_embeds.shape[1]
291
- uncond_input = self.tokenizer(
292
- uncond_tokens,
293
- padding="max_length",
294
- max_length=max_length,
295
- truncation=True,
296
- return_tensors="pt",
297
- )
298
-
299
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
300
- attention_mask = uncond_input.attention_mask.to(device)
301
- else:
302
- attention_mask = None
303
-
304
- negative_prompt_embeds = self.text_encoder(
305
- uncond_input.input_ids.to(device),
306
- attention_mask=attention_mask,
307
- )
308
- negative_prompt_embeds = negative_prompt_embeds[0]
309
-
310
- if do_classifier_free_guidance:
311
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
312
- seq_len = negative_prompt_embeds.shape[1]
313
-
314
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
315
-
316
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
317
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
318
-
319
- # For classifier free guidance, we need to do two forward passes.
320
- # Here we concatenate the unconditional and text embeddings into a single batch
321
- # to avoid doing two forward passes
322
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
323
-
324
- return prompt_embeds
325
-
326
- def CLIP_preprocess(self, x):
327
- dtype = x.dtype
328
- # following openai's implementation
329
- # TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741
330
- # follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608
331
- if isinstance(x, torch.Tensor):
332
- if x.min() < -1.0 or x.max() > 1.0:
333
- raise ValueError("Expected input tensor to have values in the range [-1, 1]")
334
- x = kornia.geometry.resize(
335
- x.to(torch.float32), (224, 224), interpolation="bicubic", align_corners=True, antialias=False
336
- ).to(dtype=dtype)
337
- x = (x + 1.0) / 2.0
338
- # renormalize according to clip
339
- x = kornia.enhance.normalize(
340
- x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711])
341
- )
342
- return x
343
-
344
- # from image_variation
345
- def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance):
346
- dtype = next(self.image_encoder.parameters()).dtype
347
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
348
- raise ValueError(
349
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
350
- )
351
-
352
- if isinstance(image, torch.Tensor):
353
- # Batch single image
354
- if image.ndim == 3:
355
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
356
- image = image.unsqueeze(0)
357
-
358
- assert image.ndim == 4, "Image must have 4 dimensions"
359
-
360
- # Check image is in [-1, 1]
361
- if image.min() < -1 or image.max() > 1:
362
- raise ValueError("Image should be in [-1, 1] range")
363
- else:
364
- # preprocess image
365
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
366
- image = [image]
367
-
368
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
369
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
370
- image = np.concatenate(image, axis=0)
371
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
372
- image = np.concatenate([i[None, :] for i in image], axis=0)
373
-
374
- image = image.transpose(0, 3, 1, 2)
375
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
376
-
377
- image = image.to(device=device, dtype=dtype)
378
-
379
- image = self.CLIP_preprocess(image)
380
- # if not isinstance(image, torch.Tensor):
381
- # # 0-255
382
- # print("Warning: image is processed by hf's preprocess, which is different from openai original's.")
383
- # image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
384
- image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype)
385
- image_embeddings = image_embeddings.unsqueeze(1)
386
-
387
- # duplicate image embeddings for each generation per prompt, using mps friendly method
388
- bs_embed, seq_len, _ = image_embeddings.shape
389
- image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
390
- image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
391
-
392
- if do_classifier_free_guidance:
393
- negative_prompt_embeds = torch.zeros_like(image_embeddings)
394
-
395
- # For classifier free guidance, we need to do two forward passes.
396
- # Here we concatenate the unconditional and text embeddings into a single batch
397
- # to avoid doing two forward passes
398
- image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
399
-
400
- return image_embeddings
401
-
402
- def _encode_pose(self, pose, device, num_images_per_prompt, do_classifier_free_guidance):
403
- dtype = next(self.cc_projection.parameters()).dtype
404
- if isinstance(pose, torch.Tensor):
405
- pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype)
406
- else:
407
- if isinstance(pose[0], list):
408
- pose = torch.Tensor(pose)
409
- else:
410
- pose = torch.Tensor([pose])
411
- x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1)
412
- pose_embeddings = (
413
- torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1)
414
- .unsqueeze(1)
415
- .to(device=device, dtype=dtype)
416
- ) # B, 1, 4
417
- # duplicate pose embeddings for each generation per prompt, using mps friendly method
418
- bs_embed, seq_len, _ = pose_embeddings.shape
419
- pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1)
420
- pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
421
- if do_classifier_free_guidance:
422
- negative_prompt_embeds = torch.zeros_like(pose_embeddings)
423
-
424
- # For classifier free guidance, we need to do two forward passes.
425
- # Here we concatenate the unconditional and text embeddings into a single batch
426
- # to avoid doing two forward passes
427
- pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings])
428
- return pose_embeddings
429
-
430
- def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_classifier_free_guidance):
431
- img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False)
432
- pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False)
433
- prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1)
434
- prompt_embeds = self.cc_projection(prompt_embeds)
435
- # prompt_embeds = img_prompt_embeds
436
- # follow 0123, add negative prompt, after projection
437
- if do_classifier_free_guidance:
438
- negative_prompt = torch.zeros_like(prompt_embeds)
439
- prompt_embeds = torch.cat([negative_prompt, prompt_embeds])
440
- return prompt_embeds
441
-
442
- def run_safety_checker(self, image, device, dtype):
443
- if self.safety_checker is not None:
444
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
445
- image, has_nsfw_concept = self.safety_checker(
446
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
447
- )
448
- else:
449
- has_nsfw_concept = None
450
- return image, has_nsfw_concept
451
-
452
- def decode_latents(self, latents):
453
- latents = 1 / self.vae.config.scaling_factor * latents
454
- image = self.vae.decode(latents).sample
455
- image = (image / 2 + 0.5).clamp(0, 1)
456
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
457
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
458
- return image
459
-
460
- def prepare_extra_step_kwargs(self, generator, eta):
461
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
462
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
463
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
464
- # and should be between [0, 1]
465
-
466
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
467
- extra_step_kwargs = {}
468
- if accepts_eta:
469
- extra_step_kwargs["eta"] = eta
470
-
471
- # check if the scheduler accepts generator
472
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
473
- if accepts_generator:
474
- extra_step_kwargs["generator"] = generator
475
- return extra_step_kwargs
476
-
477
- def check_inputs(self, image, height, width, callback_steps):
478
- if (
479
- not isinstance(image, torch.Tensor)
480
- and not isinstance(image, PIL.Image.Image)
481
- and not isinstance(image, list)
482
- ):
483
- raise ValueError(
484
- "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
485
- f" {type(image)}"
486
- )
487
-
488
- if height % 8 != 0 or width % 8 != 0:
489
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
490
-
491
- if (callback_steps is None) or (
492
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
493
- ):
494
- raise ValueError(
495
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
496
- f" {type(callback_steps)}."
497
- )
498
-
499
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
500
- shape = (
501
- batch_size,
502
- num_channels_latents,
503
- int(height) // self.vae_scale_factor,
504
- int(width) // self.vae_scale_factor,
505
- )
506
- if isinstance(generator, list) and len(generator) != batch_size:
507
- raise ValueError(
508
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
509
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
510
- )
511
-
512
- if latents is None:
513
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
514
- else:
515
- latents = latents.to(device)
516
-
517
- # scale the initial noise by the standard deviation required by the scheduler
518
- latents = latents * self.scheduler.init_noise_sigma
519
- return latents
520
-
521
- def prepare_img_latents(self, image, batch_size, dtype, device, generator=None, do_classifier_free_guidance=False):
522
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
523
- raise ValueError(
524
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
525
- )
526
-
527
- if isinstance(image, torch.Tensor):
528
- # Batch single image
529
- if image.ndim == 3:
530
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
531
- image = image.unsqueeze(0)
532
-
533
- assert image.ndim == 4, "Image must have 4 dimensions"
534
-
535
- # Check image is in [-1, 1]
536
- if image.min() < -1 or image.max() > 1:
537
- raise ValueError("Image should be in [-1, 1] range")
538
- else:
539
- # preprocess image
540
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
541
- image = [image]
542
-
543
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
544
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
545
- image = np.concatenate(image, axis=0)
546
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
547
- image = np.concatenate([i[None, :] for i in image], axis=0)
548
-
549
- image = image.transpose(0, 3, 1, 2)
550
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
551
-
552
- image = image.to(device=device, dtype=dtype)
553
-
554
- if isinstance(generator, list) and len(generator) != batch_size:
555
- raise ValueError(
556
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
557
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
558
- )
559
-
560
- if isinstance(generator, list):
561
- init_latents = [
562
- self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i])
563
- for i in range(batch_size) # sample
564
- ]
565
- init_latents = torch.cat(init_latents, dim=0)
566
- else:
567
- init_latents = self.vae.encode(image).latent_dist.mode()
568
-
569
- # init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor
570
- if batch_size > init_latents.shape[0]:
571
- # init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1)
572
- num_images_per_prompt = batch_size // init_latents.shape[0]
573
- # duplicate image latents for each generation per prompt, using mps friendly method
574
- bs_embed, emb_c, emb_h, emb_w = init_latents.shape
575
- init_latents = init_latents.unsqueeze(1)
576
- init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1)
577
- init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w)
578
-
579
- # init_latents = torch.cat([init_latents]*2) if do_classifier_free_guidance else init_latents # follow zero123
580
- init_latents = (
581
- torch.cat([torch.zeros_like(init_latents), init_latents]) if do_classifier_free_guidance else init_latents
582
- )
583
-
584
- init_latents = init_latents.to(device=device, dtype=dtype)
585
- return init_latents
586
-
587
- # def load_cc_projection(self, pretrained_weights=None):
588
- # self.cc_projection = torch.nn.Linear(772, 768)
589
- # torch.nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])
590
- # torch.nn.init.zeros_(list(self.cc_projection.parameters())[1])
591
- # if pretrained_weights is not None:
592
- # self.cc_projection.load_state_dict(pretrained_weights)
593
-
594
- @torch.no_grad()
595
- @replace_example_docstring(EXAMPLE_DOC_STRING)
596
- def __call__(
597
- self,
598
- input_imgs: Union[torch.Tensor, PIL.Image.Image] = None,
599
- prompt_imgs: Union[torch.Tensor, PIL.Image.Image] = None,
600
- poses: Union[List[float], List[List[float]]] = None,
601
- torch_dtype=torch.float32,
602
- height: Optional[int] = None,
603
- width: Optional[int] = None,
604
- num_inference_steps: int = 50,
605
- guidance_scale: float = 3.0,
606
- negative_prompt: Optional[Union[str, List[str]]] = None,
607
- num_images_per_prompt: Optional[int] = 1,
608
- eta: float = 0.0,
609
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
610
- latents: Optional[torch.Tensor] = None,
611
- prompt_embeds: Optional[torch.Tensor] = None,
612
- negative_prompt_embeds: Optional[torch.Tensor] = None,
613
- output_type: Optional[str] = "pil",
614
- return_dict: bool = True,
615
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
616
- callback_steps: int = 1,
617
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
618
- controlnet_conditioning_scale: float = 1.0,
619
- ):
620
- r"""
621
- Function invoked when calling the pipeline for generation.
622
-
623
- Args:
624
- input_imgs (`PIL` or `List[PIL]`, *optional*):
625
- The single input image for each 3D object
626
- prompt_imgs (`PIL` or `List[PIL]`, *optional*):
627
- Same as input_imgs, but will be used later as an image prompt condition, encoded by CLIP feature
628
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
629
- The height in pixels of the generated image.
630
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
631
- The width in pixels of the generated image.
632
- num_inference_steps (`int`, *optional*, defaults to 50):
633
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
634
- expense of slower inference.
635
- guidance_scale (`float`, *optional*, defaults to 7.5):
636
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
637
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
638
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
639
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
640
- usually at the expense of lower image quality.
641
- negative_prompt (`str` or `List[str]`, *optional*):
642
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
643
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
644
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
645
- num_images_per_prompt (`int`, *optional*, defaults to 1):
646
- The number of images to generate per prompt.
647
- eta (`float`, *optional*, defaults to 0.0):
648
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
649
- [`schedulers.DDIMScheduler`], will be ignored for others.
650
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
651
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
652
- to make generation deterministic.
653
- latents (`torch.Tensor`, *optional*):
654
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
655
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
656
- tensor will ge generated by sampling using the supplied random `generator`.
657
- prompt_embeds (`torch.Tensor`, *optional*):
658
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
659
- provided, text embeddings will be generated from `prompt` input argument.
660
- negative_prompt_embeds (`torch.Tensor`, *optional*):
661
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
662
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
663
- argument.
664
- output_type (`str`, *optional*, defaults to `"pil"`):
665
- The output format of the generate image. Choose between
666
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
667
- return_dict (`bool`, *optional*, defaults to `True`):
668
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
669
- plain tuple.
670
- callback (`Callable`, *optional*):
671
- A function that will be called every `callback_steps` steps during inference. The function will be
672
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
673
- callback_steps (`int`, *optional*, defaults to 1):
674
- The frequency at which the `callback` function will be called. If not specified, the callback will be
675
- called at every step.
676
- cross_attention_kwargs (`dict`, *optional*):
677
- A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
678
- `self.processor` in
679
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
680
-
681
- Examples:
682
-
683
- Returns:
684
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
685
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
686
- When returning a tuple, the first element is a list with the generated images, and the second element is a
687
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
688
- (nsfw) content, according to the `safety_checker`.
689
- """
690
- # 0. Default height and width to unet
691
- height = height or self.unet.config.sample_size * self.vae_scale_factor
692
- width = width or self.unet.config.sample_size * self.vae_scale_factor
693
-
694
- # 1. Check inputs. Raise error if not correct
695
- # input_image = hint_imgs
696
- self.check_inputs(input_imgs, height, width, callback_steps)
697
-
698
- # 2. Define call parameters
699
- if isinstance(input_imgs, PIL.Image.Image):
700
- batch_size = 1
701
- elif isinstance(input_imgs, list):
702
- batch_size = len(input_imgs)
703
- else:
704
- batch_size = input_imgs.shape[0]
705
- device = self._execution_device
706
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
707
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
708
- # corresponds to doing no classifier free guidance.
709
- do_classifier_free_guidance = guidance_scale > 1.0
710
-
711
- # 3. Encode input image with pose as prompt
712
- prompt_embeds = self._encode_image_with_pose(
713
- prompt_imgs, poses, device, num_images_per_prompt, do_classifier_free_guidance
714
- )
715
-
716
- # 4. Prepare timesteps
717
- self.scheduler.set_timesteps(num_inference_steps, device=device)
718
- timesteps = self.scheduler.timesteps
719
-
720
- # 5. Prepare latent variables
721
- latents = self.prepare_latents(
722
- batch_size * num_images_per_prompt,
723
- 4,
724
- height,
725
- width,
726
- prompt_embeds.dtype,
727
- device,
728
- generator,
729
- latents,
730
- )
731
-
732
- # 6. Prepare image latents
733
- img_latents = self.prepare_img_latents(
734
- input_imgs,
735
- batch_size * num_images_per_prompt,
736
- prompt_embeds.dtype,
737
- device,
738
- generator,
739
- do_classifier_free_guidance,
740
- )
741
-
742
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
743
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
744
-
745
- # 7. Denoising loop
746
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
747
- with self.progress_bar(total=num_inference_steps) as progress_bar:
748
- for i, t in enumerate(timesteps):
749
- # expand the latents if we are doing classifier free guidance
750
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
751
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
752
- latent_model_input = torch.cat([latent_model_input, img_latents], dim=1)
753
-
754
- # predict the noise residual
755
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
756
-
757
- # perform guidance
758
- if do_classifier_free_guidance:
759
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
760
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
761
-
762
- # compute the previous noisy sample x_t -> x_t-1
763
- # latents = self.scheduler.step(noise_pred.to(dtype=torch.float32), t, latents.to(dtype=torch.float32)).prev_sample.to(prompt_embeds.dtype)
764
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
765
-
766
- # call the callback, if provided
767
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
768
- progress_bar.update()
769
- if callback is not None and i % callback_steps == 0:
770
- step_idx = i // getattr(self.scheduler, "order", 1)
771
- callback(step_idx, t, latents)
772
-
773
- # 8. Post-processing
774
- has_nsfw_concept = None
775
- if output_type == "latent":
776
- image = latents
777
- elif output_type == "pil":
778
- # 8. Post-processing
779
- image = self.decode_latents(latents)
780
- # 10. Convert to PIL
781
- image = self.numpy_to_pil(image)
782
- else:
783
- # 8. Post-processing
784
- image = self.decode_latents(latents)
785
-
786
- # Offload last model to CPU
787
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
788
- self.final_offload_hook.offload()
789
-
790
- if not return_dict:
791
- return (image, has_nsfw_concept)
792
-
793
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
regional_prompting_stable_diffusion.py DELETED
@@ -1,620 +0,0 @@
1
- import math
2
- from typing import Dict, Optional
3
-
4
- import torch
5
- import torchvision.transforms.functional as FF
6
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
7
-
8
- from diffusers import StableDiffusionPipeline
9
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
10
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
11
- from diffusers.schedulers import KarrasDiffusionSchedulers
12
- from diffusers.utils import USE_PEFT_BACKEND
13
-
14
-
15
- try:
16
- from compel import Compel
17
- except ImportError:
18
- Compel = None
19
-
20
- KCOMM = "ADDCOMM"
21
- KBRK = "BREAK"
22
-
23
-
24
- class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
25
- r"""
26
- Args for Regional Prompting Pipeline:
27
- rp_args:dict
28
- Required
29
- rp_args["mode"]: cols, rows, prompt, prompt-ex
30
- for cols, rows mode
31
- rp_args["div"]: ex) 1;1;1(Divide into 3 regions)
32
- for prompt, prompt-ex mode
33
- rp_args["th"]: ex) 0.5,0.5,0.6 (threshold for prompt mode)
34
-
35
- Optional
36
- rp_args["save_mask"]: True/False (save masks in prompt mode)
37
-
38
- Pipeline for text-to-image generation using Stable Diffusion.
39
-
40
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
41
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
42
-
43
- Args:
44
- vae ([`AutoencoderKL`]):
45
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
46
- text_encoder ([`CLIPTextModel`]):
47
- Frozen text-encoder. Stable Diffusion uses the text portion of
48
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
49
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
50
- tokenizer (`CLIPTokenizer`):
51
- Tokenizer of class
52
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
53
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
54
- scheduler ([`SchedulerMixin`]):
55
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
56
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
57
- safety_checker ([`StableDiffusionSafetyChecker`]):
58
- Classification module that estimates whether generated images could be considered offensive or harmful.
59
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
60
- feature_extractor ([`CLIPImageProcessor`]):
61
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
62
- """
63
-
64
- def __init__(
65
- self,
66
- vae: AutoencoderKL,
67
- text_encoder: CLIPTextModel,
68
- tokenizer: CLIPTokenizer,
69
- unet: UNet2DConditionModel,
70
- scheduler: KarrasDiffusionSchedulers,
71
- safety_checker: StableDiffusionSafetyChecker,
72
- feature_extractor: CLIPFeatureExtractor,
73
- requires_safety_checker: bool = True,
74
- ):
75
- super().__init__(
76
- vae,
77
- text_encoder,
78
- tokenizer,
79
- unet,
80
- scheduler,
81
- safety_checker,
82
- feature_extractor,
83
- requires_safety_checker,
84
- )
85
- self.register_modules(
86
- vae=vae,
87
- text_encoder=text_encoder,
88
- tokenizer=tokenizer,
89
- unet=unet,
90
- scheduler=scheduler,
91
- safety_checker=safety_checker,
92
- feature_extractor=feature_extractor,
93
- )
94
-
95
- @torch.no_grad()
96
- def __call__(
97
- self,
98
- prompt: str,
99
- height: int = 512,
100
- width: int = 512,
101
- num_inference_steps: int = 50,
102
- guidance_scale: float = 7.5,
103
- negative_prompt: str = None,
104
- num_images_per_prompt: Optional[int] = 1,
105
- eta: float = 0.0,
106
- generator: Optional[torch.Generator] = None,
107
- latents: Optional[torch.Tensor] = None,
108
- output_type: Optional[str] = "pil",
109
- return_dict: bool = True,
110
- rp_args: Dict[str, str] = None,
111
- ):
112
- active = KBRK in prompt[0] if isinstance(prompt, list) else KBRK in prompt
113
- if negative_prompt is None:
114
- negative_prompt = "" if isinstance(prompt, str) else [""] * len(prompt)
115
-
116
- device = self._execution_device
117
- regions = 0
118
-
119
- self.power = int(rp_args["power"]) if "power" in rp_args else 1
120
-
121
- prompts = prompt if isinstance(prompt, list) else [prompt]
122
- n_prompts = negative_prompt if isinstance(prompt, str) else [negative_prompt]
123
- self.batch = batch = num_images_per_prompt * len(prompts)
124
- all_prompts_cn, all_prompts_p = promptsmaker(prompts, num_images_per_prompt)
125
- all_n_prompts_cn, _ = promptsmaker(n_prompts, num_images_per_prompt)
126
-
127
- equal = len(all_prompts_cn) == len(all_n_prompts_cn)
128
-
129
- if Compel:
130
- compel = Compel(tokenizer=self.tokenizer, text_encoder=self.text_encoder)
131
-
132
- def getcompelembs(prps):
133
- embl = []
134
- for prp in prps:
135
- embl.append(compel.build_conditioning_tensor(prp))
136
- return torch.cat(embl)
137
-
138
- conds = getcompelembs(all_prompts_cn)
139
- unconds = getcompelembs(all_n_prompts_cn)
140
- embs = getcompelembs(prompts)
141
- n_embs = getcompelembs(n_prompts)
142
- prompt = negative_prompt = None
143
- else:
144
- conds = self.encode_prompt(prompts, device, 1, True)[0]
145
- unconds = (
146
- self.encode_prompt(n_prompts, device, 1, True)[0]
147
- if equal
148
- else self.encode_prompt(all_n_prompts_cn, device, 1, True)[0]
149
- )
150
- embs = n_embs = None
151
-
152
- if not active:
153
- pcallback = None
154
- mode = None
155
- else:
156
- if any(x in rp_args["mode"].upper() for x in ["COL", "ROW"]):
157
- mode = "COL" if "COL" in rp_args["mode"].upper() else "ROW"
158
- ocells, icells, regions = make_cells(rp_args["div"])
159
-
160
- elif "PRO" in rp_args["mode"].upper():
161
- regions = len(all_prompts_p[0])
162
- mode = "PROMPT"
163
- reset_attnmaps(self)
164
- self.ex = "EX" in rp_args["mode"].upper()
165
- self.target_tokens = target_tokens = tokendealer(self, all_prompts_p)
166
- thresholds = [float(x) for x in rp_args["th"].split(",")]
167
-
168
- orig_hw = (height, width)
169
- revers = True
170
-
171
- def pcallback(s_self, step: int, timestep: int, latents: torch.Tensor, selfs=None):
172
- if "PRO" in mode: # in Prompt mode, make masks from sum of attension maps
173
- self.step = step
174
-
175
- if len(self.attnmaps_sizes) > 3:
176
- self.history[step] = self.attnmaps.copy()
177
- for hw in self.attnmaps_sizes:
178
- allmasks = []
179
- basemasks = [None] * batch
180
- for tt, th in zip(target_tokens, thresholds):
181
- for b in range(batch):
182
- key = f"{tt}-{b}"
183
- _, mask, _ = makepmask(self, self.attnmaps[key], hw[0], hw[1], th, step)
184
- mask = mask.unsqueeze(0).unsqueeze(-1)
185
- if self.ex:
186
- allmasks[b::batch] = [x - mask for x in allmasks[b::batch]]
187
- allmasks[b::batch] = [torch.where(x > 0, 1, 0) for x in allmasks[b::batch]]
188
- allmasks.append(mask)
189
- basemasks[b] = mask if basemasks[b] is None else basemasks[b] + mask
190
- basemasks = [1 - mask for mask in basemasks]
191
- basemasks = [torch.where(x > 0, 1, 0) for x in basemasks]
192
- allmasks = basemasks + allmasks
193
-
194
- self.attnmasks[hw] = torch.cat(allmasks)
195
- self.maskready = True
196
- return latents
197
-
198
- def hook_forward(module):
199
- # diffusers==0.23.2
200
- def forward(
201
- hidden_states: torch.Tensor,
202
- encoder_hidden_states: Optional[torch.Tensor] = None,
203
- attention_mask: Optional[torch.Tensor] = None,
204
- temb: Optional[torch.Tensor] = None,
205
- scale: float = 1.0,
206
- ) -> torch.Tensor:
207
- attn = module
208
- xshape = hidden_states.shape
209
- self.hw = (h, w) = split_dims(xshape[1], *orig_hw)
210
-
211
- if revers:
212
- nx, px = hidden_states.chunk(2)
213
- else:
214
- px, nx = hidden_states.chunk(2)
215
-
216
- if equal:
217
- hidden_states = torch.cat(
218
- [px for i in range(regions)] + [nx for i in range(regions)],
219
- 0,
220
- )
221
- encoder_hidden_states = torch.cat([conds] + [unconds])
222
- else:
223
- hidden_states = torch.cat([px for i in range(regions)] + [nx], 0)
224
- encoder_hidden_states = torch.cat([conds] + [unconds])
225
-
226
- residual = hidden_states
227
-
228
- args = () if USE_PEFT_BACKEND else (scale,)
229
-
230
- if attn.spatial_norm is not None:
231
- hidden_states = attn.spatial_norm(hidden_states, temb)
232
-
233
- input_ndim = hidden_states.ndim
234
-
235
- if input_ndim == 4:
236
- batch_size, channel, height, width = hidden_states.shape
237
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
238
-
239
- batch_size, sequence_length, _ = (
240
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
241
- )
242
-
243
- if attention_mask is not None:
244
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
245
- attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
246
-
247
- if attn.group_norm is not None:
248
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
249
-
250
- args = () if USE_PEFT_BACKEND else (scale,)
251
- query = attn.to_q(hidden_states, *args)
252
-
253
- if encoder_hidden_states is None:
254
- encoder_hidden_states = hidden_states
255
- elif attn.norm_cross:
256
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
257
-
258
- key = attn.to_k(encoder_hidden_states, *args)
259
- value = attn.to_v(encoder_hidden_states, *args)
260
-
261
- inner_dim = key.shape[-1]
262
- head_dim = inner_dim // attn.heads
263
-
264
- query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
265
-
266
- key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
267
- value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
268
-
269
- # the output of sdp = (batch, num_heads, seq_len, head_dim)
270
- # TODO: add support for attn.scale when we move to Torch 2.1
271
- hidden_states = scaled_dot_product_attention(
272
- self,
273
- query,
274
- key,
275
- value,
276
- attn_mask=attention_mask,
277
- dropout_p=0.0,
278
- is_causal=False,
279
- getattn="PRO" in mode,
280
- )
281
-
282
- hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
283
- hidden_states = hidden_states.to(query.dtype)
284
-
285
- # linear proj
286
- hidden_states = attn.to_out[0](hidden_states, *args)
287
- # dropout
288
- hidden_states = attn.to_out[1](hidden_states)
289
-
290
- if input_ndim == 4:
291
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
292
-
293
- if attn.residual_connection:
294
- hidden_states = hidden_states + residual
295
-
296
- hidden_states = hidden_states / attn.rescale_output_factor
297
-
298
- #### Regional Prompting Col/Row mode
299
- if any(x in mode for x in ["COL", "ROW"]):
300
- reshaped = hidden_states.reshape(hidden_states.size()[0], h, w, hidden_states.size()[2])
301
- center = reshaped.shape[0] // 2
302
- px = reshaped[0:center] if equal else reshaped[0:-batch]
303
- nx = reshaped[center:] if equal else reshaped[-batch:]
304
- outs = [px, nx] if equal else [px]
305
- for out in outs:
306
- c = 0
307
- for i, ocell in enumerate(ocells):
308
- for icell in icells[i]:
309
- if "ROW" in mode:
310
- out[
311
- 0:batch,
312
- int(h * ocell[0]) : int(h * ocell[1]),
313
- int(w * icell[0]) : int(w * icell[1]),
314
- :,
315
- ] = out[
316
- c * batch : (c + 1) * batch,
317
- int(h * ocell[0]) : int(h * ocell[1]),
318
- int(w * icell[0]) : int(w * icell[1]),
319
- :,
320
- ]
321
- else:
322
- out[
323
- 0:batch,
324
- int(h * icell[0]) : int(h * icell[1]),
325
- int(w * ocell[0]) : int(w * ocell[1]),
326
- :,
327
- ] = out[
328
- c * batch : (c + 1) * batch,
329
- int(h * icell[0]) : int(h * icell[1]),
330
- int(w * ocell[0]) : int(w * ocell[1]),
331
- :,
332
- ]
333
- c += 1
334
- px, nx = (px[0:batch], nx[0:batch]) if equal else (px[0:batch], nx)
335
- hidden_states = torch.cat([nx, px], 0) if revers else torch.cat([px, nx], 0)
336
- hidden_states = hidden_states.reshape(xshape)
337
-
338
- #### Regional Prompting Prompt mode
339
- elif "PRO" in mode:
340
- px, nx = (
341
- torch.chunk(hidden_states) if equal else hidden_states[0:-batch],
342
- hidden_states[-batch:],
343
- )
344
-
345
- if (h, w) in self.attnmasks and self.maskready:
346
-
347
- def mask(input):
348
- out = torch.multiply(input, self.attnmasks[(h, w)])
349
- for b in range(batch):
350
- for r in range(1, regions):
351
- out[b] = out[b] + out[r * batch + b]
352
- return out
353
-
354
- px, nx = (mask(px), mask(nx)) if equal else (mask(px), nx)
355
- px, nx = (px[0:batch], nx[0:batch]) if equal else (px[0:batch], nx)
356
- hidden_states = torch.cat([nx, px], 0) if revers else torch.cat([px, nx], 0)
357
- return hidden_states
358
-
359
- return forward
360
-
361
- def hook_forwards(root_module: torch.nn.Module):
362
- for name, module in root_module.named_modules():
363
- if "attn2" in name and module.__class__.__name__ == "Attention":
364
- module.forward = hook_forward(module)
365
-
366
- hook_forwards(self.unet)
367
-
368
- output = StableDiffusionPipeline(**self.components)(
369
- prompt=prompt,
370
- prompt_embeds=embs,
371
- negative_prompt=negative_prompt,
372
- negative_prompt_embeds=n_embs,
373
- height=height,
374
- width=width,
375
- num_inference_steps=num_inference_steps,
376
- guidance_scale=guidance_scale,
377
- num_images_per_prompt=num_images_per_prompt,
378
- eta=eta,
379
- generator=generator,
380
- latents=latents,
381
- output_type=output_type,
382
- return_dict=return_dict,
383
- callback_on_step_end=pcallback,
384
- )
385
-
386
- if "save_mask" in rp_args:
387
- save_mask = rp_args["save_mask"]
388
- else:
389
- save_mask = False
390
-
391
- if mode == "PROMPT" and save_mask:
392
- saveattnmaps(
393
- self,
394
- output,
395
- height,
396
- width,
397
- thresholds,
398
- num_inference_steps // 2,
399
- regions,
400
- )
401
-
402
- return output
403
-
404
-
405
- ### Make prompt list for each regions
406
- def promptsmaker(prompts, batch):
407
- out_p = []
408
- plen = len(prompts)
409
- for prompt in prompts:
410
- add = ""
411
- if KCOMM in prompt:
412
- add, prompt = prompt.split(KCOMM)
413
- add = add + " "
414
- prompts = prompt.split(KBRK)
415
- out_p.append([add + p for p in prompts])
416
- out = [None] * batch * len(out_p[0]) * len(out_p)
417
- for p, prs in enumerate(out_p): # inputs prompts
418
- for r, pr in enumerate(prs): # prompts for regions
419
- start = (p + r * plen) * batch
420
- out[start : start + batch] = [pr] * batch # P1R1B1,P1R1B2...,P1R2B1,P1R2B2...,P2R1B1...
421
- return out, out_p
422
-
423
-
424
- ### make regions from ratios
425
- ### ";" makes outercells, "," makes inner cells
426
- def make_cells(ratios):
427
- if ";" not in ratios and "," in ratios:
428
- ratios = ratios.replace(",", ";")
429
- ratios = ratios.split(";")
430
- ratios = [inratios.split(",") for inratios in ratios]
431
-
432
- icells = []
433
- ocells = []
434
-
435
- def startend(cells, array):
436
- current_start = 0
437
- array = [float(x) for x in array]
438
- for value in array:
439
- end = current_start + (value / sum(array))
440
- cells.append([current_start, end])
441
- current_start = end
442
-
443
- startend(ocells, [r[0] for r in ratios])
444
-
445
- for inratios in ratios:
446
- if 2 > len(inratios):
447
- icells.append([[0, 1]])
448
- else:
449
- add = []
450
- startend(add, inratios[1:])
451
- icells.append(add)
452
-
453
- return ocells, icells, sum(len(cell) for cell in icells)
454
-
455
-
456
- def make_emblist(self, prompts):
457
- with torch.no_grad():
458
- tokens = self.tokenizer(
459
- prompts,
460
- max_length=self.tokenizer.model_max_length,
461
- padding=True,
462
- truncation=True,
463
- return_tensors="pt",
464
- ).input_ids.to(self.device)
465
- embs = self.text_encoder(tokens, output_hidden_states=True).last_hidden_state.to(self.device, dtype=self.dtype)
466
- return embs
467
-
468
-
469
- def split_dims(xs, height, width):
470
- xs = xs
471
-
472
- def repeat_div(x, y):
473
- while y > 0:
474
- x = math.ceil(x / 2)
475
- y = y - 1
476
- return x
477
-
478
- scale = math.ceil(math.log2(math.sqrt(height * width / xs)))
479
- dsh = repeat_div(height, scale)
480
- dsw = repeat_div(width, scale)
481
- return dsh, dsw
482
-
483
-
484
- ##### for prompt mode
485
- def get_attn_maps(self, attn):
486
- height, width = self.hw
487
- target_tokens = self.target_tokens
488
- if (height, width) not in self.attnmaps_sizes:
489
- self.attnmaps_sizes.append((height, width))
490
-
491
- for b in range(self.batch):
492
- for t in target_tokens:
493
- power = self.power
494
- add = attn[b, :, :, t[0] : t[0] + len(t)] ** (power) * (self.attnmaps_sizes.index((height, width)) + 1)
495
- add = torch.sum(add, dim=2)
496
- key = f"{t}-{b}"
497
- if key not in self.attnmaps:
498
- self.attnmaps[key] = add
499
- else:
500
- if self.attnmaps[key].shape[1] != add.shape[1]:
501
- add = add.view(8, height, width)
502
- add = FF.resize(add, self.attnmaps_sizes[0], antialias=None)
503
- add = add.reshape_as(self.attnmaps[key])
504
-
505
- self.attnmaps[key] = self.attnmaps[key] + add
506
-
507
-
508
- def reset_attnmaps(self): # init parameters in every batch
509
- self.step = 0
510
- self.attnmaps = {} # maked from attention maps
511
- self.attnmaps_sizes = [] # height,width set of u-net blocks
512
- self.attnmasks = {} # maked from attnmaps for regions
513
- self.maskready = False
514
- self.history = {}
515
-
516
-
517
- def saveattnmaps(self, output, h, w, th, step, regions):
518
- masks = []
519
- for i, mask in enumerate(self.history[step].values()):
520
- img, _, mask = makepmask(self, mask, h, w, th[i % len(th)], step)
521
- if self.ex:
522
- masks = [x - mask for x in masks]
523
- masks.append(mask)
524
- if len(masks) == regions - 1:
525
- output.images.extend([FF.to_pil_image(mask) for mask in masks])
526
- masks = []
527
- else:
528
- output.images.append(img)
529
-
530
-
531
- def makepmask(
532
- self, mask, h, w, th, step
533
- ): # make masks from attention cache return [for preview, for attention, for Latent]
534
- th = th - step * 0.005
535
- if 0.05 >= th:
536
- th = 0.05
537
- mask = torch.mean(mask, dim=0)
538
- mask = mask / mask.max().item()
539
- mask = torch.where(mask > th, 1, 0)
540
- mask = mask.float()
541
- mask = mask.view(1, *self.attnmaps_sizes[0])
542
- img = FF.to_pil_image(mask)
543
- img = img.resize((w, h))
544
- mask = FF.resize(mask, (h, w), interpolation=FF.InterpolationMode.NEAREST, antialias=None)
545
- lmask = mask
546
- mask = mask.reshape(h * w)
547
- mask = torch.where(mask > 0.1, 1, 0)
548
- return img, mask, lmask
549
-
550
-
551
- def tokendealer(self, all_prompts):
552
- for prompts in all_prompts:
553
- targets = [p.split(",")[-1] for p in prompts[1:]]
554
- tt = []
555
-
556
- for target in targets:
557
- ptokens = (
558
- self.tokenizer(
559
- prompts,
560
- max_length=self.tokenizer.model_max_length,
561
- padding=True,
562
- truncation=True,
563
- return_tensors="pt",
564
- ).input_ids
565
- )[0]
566
- ttokens = (
567
- self.tokenizer(
568
- target,
569
- max_length=self.tokenizer.model_max_length,
570
- padding=True,
571
- truncation=True,
572
- return_tensors="pt",
573
- ).input_ids
574
- )[0]
575
-
576
- tlist = []
577
-
578
- for t in range(ttokens.shape[0] - 2):
579
- for p in range(ptokens.shape[0]):
580
- if ttokens[t + 1] == ptokens[p]:
581
- tlist.append(p)
582
- if tlist != []:
583
- tt.append(tlist)
584
-
585
- return tt
586
-
587
-
588
- def scaled_dot_product_attention(
589
- self,
590
- query,
591
- key,
592
- value,
593
- attn_mask=None,
594
- dropout_p=0.0,
595
- is_causal=False,
596
- scale=None,
597
- getattn=False,
598
- ) -> torch.Tensor:
599
- # Efficient implementation equivalent to the following:
600
- L, S = query.size(-2), key.size(-2)
601
- scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
602
- attn_bias = torch.zeros(L, S, dtype=query.dtype, device=self.device)
603
- if is_causal:
604
- assert attn_mask is None
605
- temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
606
- attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
607
- attn_bias.to(query.dtype)
608
-
609
- if attn_mask is not None:
610
- if attn_mask.dtype == torch.bool:
611
- attn_mask.masked_fill_(attn_mask.logical_not(), float("-inf"))
612
- else:
613
- attn_bias += attn_mask
614
- attn_weight = query @ key.transpose(-2, -1) * scale_factor
615
- attn_weight += attn_bias
616
- attn_weight = torch.softmax(attn_weight, dim=-1)
617
- if getattn:
618
- get_attn_maps(self, attn_weight)
619
- attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
620
- return attn_weight @ value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
rerender_a_video.py DELETED
@@ -1,1194 +0,0 @@
1
- # Copyright 2024 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from dataclasses import dataclass
16
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
-
18
- import numpy as np
19
- import PIL.Image
20
- import torch
21
- import torch.nn.functional as F
22
- import torchvision.transforms as T
23
- from gmflow.gmflow import GMFlow
24
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
25
-
26
- from diffusers.image_processor import VaeImageProcessor
27
- from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
28
- from diffusers.models.attention_processor import Attention, AttnProcessor
29
- from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
30
- from diffusers.pipelines.controlnet.pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline
31
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
32
- from diffusers.schedulers import KarrasDiffusionSchedulers
33
- from diffusers.utils import BaseOutput, deprecate, logging
34
- from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
35
-
36
-
37
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
38
-
39
-
40
- def coords_grid(b, h, w, homogeneous=False, device=None):
41
- y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W]
42
-
43
- stacks = [x, y]
44
-
45
- if homogeneous:
46
- ones = torch.ones_like(x) # [H, W]
47
- stacks.append(ones)
48
-
49
- grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W]
50
-
51
- grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W]
52
-
53
- if device is not None:
54
- grid = grid.to(device)
55
-
56
- return grid
57
-
58
-
59
- def bilinear_sample(img, sample_coords, mode="bilinear", padding_mode="zeros", return_mask=False):
60
- # img: [B, C, H, W]
61
- # sample_coords: [B, 2, H, W] in image scale
62
- if sample_coords.size(1) != 2: # [B, H, W, 2]
63
- sample_coords = sample_coords.permute(0, 3, 1, 2)
64
-
65
- b, _, h, w = sample_coords.shape
66
-
67
- # Normalize to [-1, 1]
68
- x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1
69
- y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1
70
-
71
- grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2]
72
-
73
- img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
74
-
75
- if return_mask:
76
- mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W]
77
-
78
- return img, mask
79
-
80
- return img
81
-
82
-
83
- def flow_warp(feature, flow, mask=False, mode="bilinear", padding_mode="zeros"):
84
- b, c, h, w = feature.size()
85
- assert flow.size(1) == 2
86
-
87
- grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W]
88
- grid = grid.to(feature.dtype)
89
- return bilinear_sample(feature, grid, mode=mode, padding_mode=padding_mode, return_mask=mask)
90
-
91
-
92
- def forward_backward_consistency_check(fwd_flow, bwd_flow, alpha=0.01, beta=0.5):
93
- # fwd_flow, bwd_flow: [B, 2, H, W]
94
- # alpha and beta values are following UnFlow
95
- # (https://arxiv.org/abs/1711.07837)
96
- assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4
97
- assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2
98
- flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W]
99
-
100
- warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W]
101
- warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W]
102
-
103
- diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W]
104
- diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)
105
-
106
- threshold = alpha * flow_mag + beta
107
-
108
- fwd_occ = (diff_fwd > threshold).float() # [B, H, W]
109
- bwd_occ = (diff_bwd > threshold).float()
110
-
111
- return fwd_occ, bwd_occ
112
-
113
-
114
- @torch.no_grad()
115
- def get_warped_and_mask(flow_model, image1, image2, image3=None, pixel_consistency=False, device=None):
116
- if image3 is None:
117
- image3 = image1
118
- padder = InputPadder(image1.shape, padding_factor=8)
119
- image1, image2 = padder.pad(image1[None].to(device), image2[None].to(device))
120
- results_dict = flow_model(
121
- image1, image2, attn_splits_list=[2], corr_radius_list=[-1], prop_radius_list=[-1], pred_bidir_flow=True
122
- )
123
- flow_pr = results_dict["flow_preds"][-1] # [B, 2, H, W]
124
- fwd_flow = padder.unpad(flow_pr[0]).unsqueeze(0) # [1, 2, H, W]
125
- bwd_flow = padder.unpad(flow_pr[1]).unsqueeze(0) # [1, 2, H, W]
126
- fwd_occ, bwd_occ = forward_backward_consistency_check(fwd_flow, bwd_flow) # [1, H, W] float
127
- if pixel_consistency:
128
- warped_image1 = flow_warp(image1, bwd_flow)
129
- bwd_occ = torch.clamp(
130
- bwd_occ + (abs(image2 - warped_image1).mean(dim=1) > 255 * 0.25).float(), 0, 1
131
- ).unsqueeze(0)
132
- warped_results = flow_warp(image3, bwd_flow)
133
- return warped_results, bwd_occ, bwd_flow
134
-
135
-
136
- blur = T.GaussianBlur(kernel_size=(9, 9), sigma=(18, 18))
137
-
138
-
139
- @dataclass
140
- class TextToVideoSDPipelineOutput(BaseOutput):
141
- """
142
- Output class for text-to-video pipelines.
143
-
144
- Args:
145
- frames (`List[np.ndarray]` or `torch.Tensor`)
146
- List of denoised frames (essentially images) as NumPy arrays of shape `(height, width, num_channels)` or as
147
- a `torch` tensor. The length of the list denotes the video length (the number of frames).
148
- """
149
-
150
- frames: Union[List[np.ndarray], torch.Tensor]
151
-
152
-
153
- @torch.no_grad()
154
- def find_flat_region(mask):
155
- device = mask.device
156
- kernel_x = torch.Tensor([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]).unsqueeze(0).unsqueeze(0).to(device)
157
- kernel_y = torch.Tensor([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]).unsqueeze(0).unsqueeze(0).to(device)
158
- mask_ = F.pad(mask.unsqueeze(0), (1, 1, 1, 1), mode="replicate")
159
-
160
- grad_x = torch.nn.functional.conv2d(mask_, kernel_x)
161
- grad_y = torch.nn.functional.conv2d(mask_, kernel_y)
162
- return ((abs(grad_x) + abs(grad_y)) == 0).float()[0]
163
-
164
-
165
- class AttnState:
166
- STORE = 0
167
- LOAD = 1
168
- LOAD_AND_STORE_PREV = 2
169
-
170
- def __init__(self):
171
- self.reset()
172
-
173
- @property
174
- def state(self):
175
- return self.__state
176
-
177
- @property
178
- def timestep(self):
179
- return self.__timestep
180
-
181
- def set_timestep(self, t):
182
- self.__timestep = t
183
-
184
- def reset(self):
185
- self.__state = AttnState.STORE
186
- self.__timestep = 0
187
-
188
- def to_load(self):
189
- self.__state = AttnState.LOAD
190
-
191
- def to_load_and_store_prev(self):
192
- self.__state = AttnState.LOAD_AND_STORE_PREV
193
-
194
-
195
- class CrossFrameAttnProcessor(AttnProcessor):
196
- """
197
- Cross frame attention processor. Each frame attends the first frame and previous frame.
198
-
199
- Args:
200
- attn_state: Whether the model is processing the first frame or an intermediate frame
201
- """
202
-
203
- def __init__(self, attn_state: AttnState):
204
- super().__init__()
205
- self.attn_state = attn_state
206
- self.first_maps = {}
207
- self.prev_maps = {}
208
-
209
- def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None):
210
- # Is self attention
211
- if encoder_hidden_states is None:
212
- t = self.attn_state.timestep
213
- if self.attn_state.state == AttnState.STORE:
214
- self.first_maps[t] = hidden_states.detach()
215
- self.prev_maps[t] = hidden_states.detach()
216
- res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb)
217
- else:
218
- if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV:
219
- tmp = hidden_states.detach()
220
- cross_map = torch.cat((self.first_maps[t], self.prev_maps[t]), dim=1)
221
- res = super().__call__(attn, hidden_states, cross_map, attention_mask, temb)
222
- if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV:
223
- self.prev_maps[t] = tmp
224
- else:
225
- res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb)
226
-
227
- return res
228
-
229
-
230
- def prepare_image(image):
231
- if isinstance(image, torch.Tensor):
232
- # Batch single image
233
- if image.ndim == 3:
234
- image = image.unsqueeze(0)
235
-
236
- image = image.to(dtype=torch.float32)
237
- else:
238
- # preprocess image
239
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
240
- image = [image]
241
-
242
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
243
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
244
- image = np.concatenate(image, axis=0)
245
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
246
- image = np.concatenate([i[None, :] for i in image], axis=0)
247
-
248
- image = image.transpose(0, 3, 1, 2)
249
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
250
-
251
- return image
252
-
253
-
254
- class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
255
- r"""
256
- Pipeline for video-to-video translation using Stable Diffusion with Rerender Algorithm.
257
-
258
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
259
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
260
-
261
- In addition the pipeline inherits the following loading methods:
262
- - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
263
-
264
- Args:
265
- vae ([`AutoencoderKL`]):
266
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
267
- text_encoder ([`CLIPTextModel`]):
268
- Frozen text-encoder. Stable Diffusion uses the text portion of
269
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
270
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
271
- tokenizer (`CLIPTokenizer`):
272
- Tokenizer of class
273
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
274
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
275
- controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
276
- Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
277
- as a list, the outputs from each ControlNet are added together to create one combined additional
278
- conditioning.
279
- scheduler ([`SchedulerMixin`]):
280
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
281
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
282
- safety_checker ([`StableDiffusionSafetyChecker`]):
283
- Classification module that estimates whether generated images could be considered offensive or harmful.
284
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
285
- feature_extractor ([`CLIPImageProcessor`]):
286
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
287
- """
288
-
289
- _optional_components = ["safety_checker", "feature_extractor"]
290
-
291
- def __init__(
292
- self,
293
- vae: AutoencoderKL,
294
- text_encoder: CLIPTextModel,
295
- tokenizer: CLIPTokenizer,
296
- unet: UNet2DConditionModel,
297
- controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
298
- scheduler: KarrasDiffusionSchedulers,
299
- safety_checker: StableDiffusionSafetyChecker,
300
- feature_extractor: CLIPImageProcessor,
301
- image_encoder=None,
302
- requires_safety_checker: bool = True,
303
- device=None,
304
- ):
305
- super().__init__(
306
- vae,
307
- text_encoder,
308
- tokenizer,
309
- unet,
310
- controlnet,
311
- scheduler,
312
- safety_checker,
313
- feature_extractor,
314
- image_encoder,
315
- requires_safety_checker,
316
- )
317
- self.to(device)
318
-
319
- if safety_checker is None and requires_safety_checker:
320
- logger.warning(
321
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
322
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
323
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
324
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
325
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
326
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
327
- )
328
-
329
- if safety_checker is not None and feature_extractor is None:
330
- raise ValueError(
331
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
332
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
333
- )
334
-
335
- if isinstance(controlnet, (list, tuple)):
336
- controlnet = MultiControlNetModel(controlnet)
337
-
338
- self.register_modules(
339
- vae=vae,
340
- text_encoder=text_encoder,
341
- tokenizer=tokenizer,
342
- unet=unet,
343
- controlnet=controlnet,
344
- scheduler=scheduler,
345
- safety_checker=safety_checker,
346
- feature_extractor=feature_extractor,
347
- )
348
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
349
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
350
- self.control_image_processor = VaeImageProcessor(
351
- vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
352
- )
353
- self.register_to_config(requires_safety_checker=requires_safety_checker)
354
- self.attn_state = AttnState()
355
- attn_processor_dict = {}
356
- for k in unet.attn_processors.keys():
357
- if k.startswith("up"):
358
- attn_processor_dict[k] = CrossFrameAttnProcessor(self.attn_state)
359
- else:
360
- attn_processor_dict[k] = AttnProcessor()
361
-
362
- self.unet.set_attn_processor(attn_processor_dict)
363
-
364
- flow_model = GMFlow(
365
- feature_channels=128,
366
- num_scales=1,
367
- upsample_factor=8,
368
- num_head=1,
369
- attention_type="swin",
370
- ffn_dim_expansion=4,
371
- num_transformer_layers=6,
372
- ).to(self.device)
373
-
374
- checkpoint = torch.utils.model_zoo.load_url(
375
- "https://huggingface.co/Anonymous-sub/Rerender/resolve/main/models/gmflow_sintel-0c07dcb3.pth",
376
- map_location=lambda storage, loc: storage,
377
- )
378
- weights = checkpoint["model"] if "model" in checkpoint else checkpoint
379
- flow_model.load_state_dict(weights, strict=False)
380
- flow_model.eval()
381
- self.flow_model = flow_model
382
-
383
- # Modified from src/diffusers/pipelines/controlnet/pipeline_controlnet.StableDiffusionControlNetImg2ImgPipeline.check_inputs
384
- def check_inputs(
385
- self,
386
- prompt,
387
- callback_steps,
388
- negative_prompt=None,
389
- prompt_embeds=None,
390
- negative_prompt_embeds=None,
391
- controlnet_conditioning_scale=1.0,
392
- control_guidance_start=0.0,
393
- control_guidance_end=1.0,
394
- ):
395
- if (callback_steps is None) or (
396
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
397
- ):
398
- raise ValueError(
399
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
400
- f" {type(callback_steps)}."
401
- )
402
-
403
- if prompt is not None and prompt_embeds is not None:
404
- raise ValueError(
405
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
406
- " only forward one of the two."
407
- )
408
- elif prompt is None and prompt_embeds is None:
409
- raise ValueError(
410
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
411
- )
412
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
413
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
414
-
415
- if negative_prompt is not None and negative_prompt_embeds is not None:
416
- raise ValueError(
417
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
418
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
419
- )
420
-
421
- if prompt_embeds is not None and negative_prompt_embeds is not None:
422
- if prompt_embeds.shape != negative_prompt_embeds.shape:
423
- raise ValueError(
424
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
425
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
426
- f" {negative_prompt_embeds.shape}."
427
- )
428
-
429
- # `prompt` needs more sophisticated handling when there are multiple
430
- # conditionings.
431
- if isinstance(self.controlnet, MultiControlNetModel):
432
- if isinstance(prompt, list):
433
- logger.warning(
434
- f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
435
- " prompts. The conditionings will be fixed across the prompts."
436
- )
437
-
438
- is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
439
- self.controlnet, torch._dynamo.eval_frame.OptimizedModule
440
- )
441
-
442
- # Check `controlnet_conditioning_scale`
443
- if (
444
- isinstance(self.controlnet, ControlNetModel)
445
- or is_compiled
446
- and isinstance(self.controlnet._orig_mod, ControlNetModel)
447
- ):
448
- if not isinstance(controlnet_conditioning_scale, float):
449
- raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
450
- elif (
451
- isinstance(self.controlnet, MultiControlNetModel)
452
- or is_compiled
453
- and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
454
- ):
455
- if isinstance(controlnet_conditioning_scale, list):
456
- if any(isinstance(i, list) for i in controlnet_conditioning_scale):
457
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
458
- elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
459
- self.controlnet.nets
460
- ):
461
- raise ValueError(
462
- "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
463
- " the same length as the number of controlnets"
464
- )
465
- else:
466
- assert False
467
-
468
- if len(control_guidance_start) != len(control_guidance_end):
469
- raise ValueError(
470
- f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
471
- )
472
-
473
- if isinstance(self.controlnet, MultiControlNetModel):
474
- if len(control_guidance_start) != len(self.controlnet.nets):
475
- raise ValueError(
476
- f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
477
- )
478
-
479
- for start, end in zip(control_guidance_start, control_guidance_end):
480
- if start >= end:
481
- raise ValueError(
482
- f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
483
- )
484
- if start < 0.0:
485
- raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
486
- if end > 1.0:
487
- raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
488
-
489
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
490
- def prepare_control_image(
491
- self,
492
- image,
493
- width,
494
- height,
495
- batch_size,
496
- num_images_per_prompt,
497
- device,
498
- dtype,
499
- do_classifier_free_guidance=False,
500
- guess_mode=False,
501
- ):
502
- image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
503
- image_batch_size = image.shape[0]
504
-
505
- if image_batch_size == 1:
506
- repeat_by = batch_size
507
- else:
508
- # image batch size is the same as prompt batch size
509
- repeat_by = num_images_per_prompt
510
-
511
- image = image.repeat_interleave(repeat_by, dim=0)
512
-
513
- image = image.to(device=device, dtype=dtype)
514
-
515
- if do_classifier_free_guidance and not guess_mode:
516
- image = torch.cat([image] * 2)
517
-
518
- return image
519
-
520
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
521
- def get_timesteps(self, num_inference_steps, strength, device):
522
- # get the original timestep using init_timestep
523
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
524
-
525
- t_start = max(num_inference_steps - init_timestep, 0)
526
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
527
-
528
- return timesteps, num_inference_steps - t_start
529
-
530
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents
531
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
532
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
533
- raise ValueError(
534
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
535
- )
536
-
537
- image = image.to(device=device, dtype=dtype)
538
-
539
- batch_size = batch_size * num_images_per_prompt
540
-
541
- if image.shape[1] == 4:
542
- init_latents = image
543
-
544
- else:
545
- if isinstance(generator, list) and len(generator) != batch_size:
546
- raise ValueError(
547
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
548
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
549
- )
550
-
551
- elif isinstance(generator, list):
552
- init_latents = [
553
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
554
- ]
555
- init_latents = torch.cat(init_latents, dim=0)
556
- else:
557
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
558
-
559
- init_latents = self.vae.config.scaling_factor * init_latents
560
-
561
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
562
- # expand init_latents for batch_size
563
- deprecation_message = (
564
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
565
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
566
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
567
- " your script to pass as many initial images as text prompts to suppress this warning."
568
- )
569
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
570
- additional_image_per_prompt = batch_size // init_latents.shape[0]
571
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
572
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
573
- raise ValueError(
574
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
575
- )
576
- else:
577
- init_latents = torch.cat([init_latents], dim=0)
578
-
579
- shape = init_latents.shape
580
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
581
-
582
- # get latents
583
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
584
- latents = init_latents
585
-
586
- return latents
587
-
588
- @torch.no_grad()
589
- def __call__(
590
- self,
591
- prompt: Union[str, List[str]] = None,
592
- frames: Union[List[np.ndarray], torch.Tensor] = None,
593
- control_frames: Union[List[np.ndarray], torch.Tensor] = None,
594
- strength: float = 0.8,
595
- num_inference_steps: int = 50,
596
- guidance_scale: float = 7.5,
597
- negative_prompt: Optional[Union[str, List[str]]] = None,
598
- eta: float = 0.0,
599
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
600
- latents: Optional[torch.Tensor] = None,
601
- prompt_embeds: Optional[torch.Tensor] = None,
602
- negative_prompt_embeds: Optional[torch.Tensor] = None,
603
- output_type: Optional[str] = "pil",
604
- return_dict: bool = True,
605
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
606
- callback_steps: int = 1,
607
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
608
- controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
609
- guess_mode: bool = False,
610
- control_guidance_start: Union[float, List[float]] = 0.0,
611
- control_guidance_end: Union[float, List[float]] = 1.0,
612
- warp_start: Union[float, List[float]] = 0.0,
613
- warp_end: Union[float, List[float]] = 0.3,
614
- mask_start: Union[float, List[float]] = 0.5,
615
- mask_end: Union[float, List[float]] = 0.8,
616
- smooth_boundary: bool = True,
617
- mask_strength: Union[float, List[float]] = 0.5,
618
- inner_strength: Union[float, List[float]] = 0.9,
619
- ):
620
- r"""
621
- Function invoked when calling the pipeline for generation.
622
-
623
- Args:
624
- prompt (`str` or `List[str]`, *optional*):
625
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
626
- instead.
627
- frames (`List[np.ndarray]` or `torch.Tensor`): The input images to be used as the starting point for the image generation process.
628
- control_frames (`List[np.ndarray]` or `torch.Tensor`): The ControlNet input images condition to provide guidance to the `unet` for generation.
629
- strength ('float'): SDEdit strength.
630
- num_inference_steps (`int`, *optional*, defaults to 50):
631
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
632
- expense of slower inference.
633
- guidance_scale (`float`, *optional*, defaults to 7.5):
634
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
635
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
636
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
637
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
638
- usually at the expense of lower image quality.
639
- negative_prompt (`str` or `List[str]`, *optional*):
640
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
641
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
642
- less than `1`).
643
- eta (`float`, *optional*, defaults to 0.0):
644
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
645
- [`schedulers.DDIMScheduler`], will be ignored for others.
646
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
647
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
648
- to make generation deterministic.
649
- latents (`torch.Tensor`, *optional*):
650
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
651
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
652
- tensor will ge generated by sampling using the supplied random `generator`.
653
- prompt_embeds (`torch.Tensor`, *optional*):
654
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
655
- provided, text embeddings will be generated from `prompt` input argument.
656
- negative_prompt_embeds (`torch.Tensor`, *optional*):
657
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
658
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
659
- argument.
660
- output_type (`str`, *optional*, defaults to `"pil"`):
661
- The output format of the generate image. Choose between
662
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
663
- return_dict (`bool`, *optional*, defaults to `True`):
664
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
665
- plain tuple.
666
- callback (`Callable`, *optional*):
667
- A function that will be called every `callback_steps` steps during inference. The function will be
668
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
669
- callback_steps (`int`, *optional*, defaults to 1):
670
- The frequency at which the `callback` function will be called. If not specified, the callback will be
671
- called at every step.
672
- cross_attention_kwargs (`dict`, *optional*):
673
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
674
- `self.processor` in
675
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
676
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
677
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
678
- to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
679
- corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
680
- than for [`~StableDiffusionControlNetPipeline.__call__`].
681
- guess_mode (`bool`, *optional*, defaults to `False`):
682
- In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
683
- you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
684
- control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
685
- The percentage of total steps at which the controlnet starts applying.
686
- control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
687
- The percentage of total steps at which the controlnet stops applying.
688
- warp_start (`float`): Shape-aware fusion start timestep.
689
- warp_end (`float`): Shape-aware fusion end timestep.
690
- mask_start (`float`): Pixel-aware fusion start timestep.
691
- mask_end (`float`):Pixel-aware fusion end timestep.
692
- smooth_boundary (`bool`): Smooth fusion boundary. Set `True` to prevent artifacts at boundary.
693
- mask_strength (`float`): Pixel-aware fusion strength.
694
- inner_strength (`float`): Pixel-aware fusion detail level.
695
-
696
- Examples:
697
-
698
- Returns:
699
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
700
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
701
- When returning a tuple, the first element is a list with the generated images, and the second element is a
702
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
703
- (nsfw) content, according to the `safety_checker`.
704
- """
705
- controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
706
-
707
- # align format for control guidance
708
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
709
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
710
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
711
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
712
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
713
- mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
714
- control_guidance_start, control_guidance_end = (
715
- mult * [control_guidance_start],
716
- mult * [control_guidance_end],
717
- )
718
-
719
- # 1. Check inputs. Raise error if not correct
720
- self.check_inputs(
721
- prompt,
722
- callback_steps,
723
- negative_prompt,
724
- prompt_embeds,
725
- negative_prompt_embeds,
726
- controlnet_conditioning_scale,
727
- control_guidance_start,
728
- control_guidance_end,
729
- )
730
-
731
- # 2. Define call parameters
732
- # Currently we only support 1 prompt
733
- if prompt is not None and isinstance(prompt, str):
734
- batch_size = 1
735
- elif prompt is not None and isinstance(prompt, list):
736
- assert False
737
- else:
738
- assert False
739
- num_images_per_prompt = 1
740
-
741
- device = self._execution_device
742
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
743
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
744
- # corresponds to doing no classifier free guidance.
745
- do_classifier_free_guidance = guidance_scale > 1.0
746
-
747
- if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
748
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
749
-
750
- global_pool_conditions = (
751
- controlnet.config.global_pool_conditions
752
- if isinstance(controlnet, ControlNetModel)
753
- else controlnet.nets[0].config.global_pool_conditions
754
- )
755
- guess_mode = guess_mode or global_pool_conditions
756
-
757
- # 3. Encode input prompt
758
- text_encoder_lora_scale = (
759
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
760
- )
761
- prompt_embeds = self._encode_prompt(
762
- prompt,
763
- device,
764
- num_images_per_prompt,
765
- do_classifier_free_guidance,
766
- negative_prompt,
767
- prompt_embeds=prompt_embeds,
768
- negative_prompt_embeds=negative_prompt_embeds,
769
- lora_scale=text_encoder_lora_scale,
770
- )
771
-
772
- # 4. Process the first frame
773
- height, width = None, None
774
- output_frames = []
775
- self.attn_state.reset()
776
-
777
- # 4.1 prepare frames
778
- image = self.image_processor.preprocess(frames[0]).to(dtype=torch.float32)
779
- first_image = image[0] # C, H, W
780
-
781
- # 4.2 Prepare controlnet_conditioning_image
782
- # Currently we only support single control
783
- if isinstance(controlnet, ControlNetModel):
784
- control_image = self.prepare_control_image(
785
- image=control_frames[0],
786
- width=width,
787
- height=height,
788
- batch_size=batch_size,
789
- num_images_per_prompt=1,
790
- device=device,
791
- dtype=controlnet.dtype,
792
- do_classifier_free_guidance=do_classifier_free_guidance,
793
- guess_mode=guess_mode,
794
- )
795
- else:
796
- assert False
797
-
798
- # 4.3 Prepare timesteps
799
- self.scheduler.set_timesteps(num_inference_steps, device=device)
800
- timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
801
- latent_timestep = timesteps[:1].repeat(batch_size)
802
-
803
- # 4.4 Prepare latent variables
804
- latents = self.prepare_latents(
805
- image,
806
- latent_timestep,
807
- batch_size,
808
- num_images_per_prompt,
809
- prompt_embeds.dtype,
810
- device,
811
- generator,
812
- )
813
-
814
- # 4.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
815
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
816
-
817
- # 4.6 Create tensor stating which controlnets to keep
818
- controlnet_keep = []
819
- for i in range(len(timesteps)):
820
- keeps = [
821
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
822
- for s, e in zip(control_guidance_start, control_guidance_end)
823
- ]
824
- controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
825
-
826
- first_x0_list = []
827
-
828
- # 4.7 Denoising loop
829
- num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order
830
- with self.progress_bar(total=cur_num_inference_steps) as progress_bar:
831
- for i, t in enumerate(timesteps):
832
- self.attn_state.set_timestep(t.item())
833
-
834
- # expand the latents if we are doing classifier free guidance
835
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
836
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
837
-
838
- # controlnet(s) inference
839
- if guess_mode and do_classifier_free_guidance:
840
- # Infer ControlNet only for the conditional batch.
841
- control_model_input = latents
842
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
843
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
844
- else:
845
- control_model_input = latent_model_input
846
- controlnet_prompt_embeds = prompt_embeds
847
-
848
- if isinstance(controlnet_keep[i], list):
849
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
850
- else:
851
- controlnet_cond_scale = controlnet_conditioning_scale
852
- if isinstance(controlnet_cond_scale, list):
853
- controlnet_cond_scale = controlnet_cond_scale[0]
854
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
855
-
856
- down_block_res_samples, mid_block_res_sample = self.controlnet(
857
- control_model_input,
858
- t,
859
- encoder_hidden_states=controlnet_prompt_embeds,
860
- controlnet_cond=control_image,
861
- conditioning_scale=cond_scale,
862
- guess_mode=guess_mode,
863
- return_dict=False,
864
- )
865
-
866
- if guess_mode and do_classifier_free_guidance:
867
- # Infered ControlNet only for the conditional batch.
868
- # To apply the output of ControlNet to both the unconditional and conditional batches,
869
- # add 0 to the unconditional batch to keep it unchanged.
870
- down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
871
- mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
872
-
873
- # predict the noise residual
874
- noise_pred = self.unet(
875
- latent_model_input,
876
- t,
877
- encoder_hidden_states=prompt_embeds,
878
- cross_attention_kwargs=cross_attention_kwargs,
879
- down_block_additional_residuals=down_block_res_samples,
880
- mid_block_additional_residual=mid_block_res_sample,
881
- return_dict=False,
882
- )[0]
883
-
884
- # perform guidance
885
- if do_classifier_free_guidance:
886
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
887
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
888
-
889
- alpha_prod_t = self.scheduler.alphas_cumprod[t]
890
- beta_prod_t = 1 - alpha_prod_t
891
- pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
892
- first_x0 = pred_x0.detach()
893
- first_x0_list.append(first_x0)
894
-
895
- # compute the previous noisy sample x_t -> x_t-1
896
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
897
-
898
- # call the callback, if provided
899
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
900
- progress_bar.update()
901
- if callback is not None and i % callback_steps == 0:
902
- callback(i, t, latents)
903
-
904
- if not output_type == "latent":
905
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
906
- else:
907
- image = latents
908
-
909
- first_result = image
910
- prev_result = image
911
- do_denormalize = [True] * image.shape[0]
912
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
913
-
914
- output_frames.append(image[0])
915
-
916
- # 5. Process each frame
917
- for idx in range(1, len(frames)):
918
- image = frames[idx]
919
- prev_image = frames[idx - 1]
920
- control_image = control_frames[idx]
921
- # 5.1 prepare frames
922
- image = self.image_processor.preprocess(image).to(dtype=torch.float32)
923
- prev_image = self.image_processor.preprocess(prev_image).to(dtype=torch.float32)
924
-
925
- warped_0, bwd_occ_0, bwd_flow_0 = get_warped_and_mask(
926
- self.flow_model, first_image, image[0], first_result, False, self.device
927
- )
928
- blend_mask_0 = blur(F.max_pool2d(bwd_occ_0, kernel_size=9, stride=1, padding=4))
929
- blend_mask_0 = torch.clamp(blend_mask_0 + bwd_occ_0, 0, 1)
930
-
931
- warped_pre, bwd_occ_pre, bwd_flow_pre = get_warped_and_mask(
932
- self.flow_model, prev_image[0], image[0], prev_result, False, self.device
933
- )
934
- blend_mask_pre = blur(F.max_pool2d(bwd_occ_pre, kernel_size=9, stride=1, padding=4))
935
- blend_mask_pre = torch.clamp(blend_mask_pre + bwd_occ_pre, 0, 1)
936
-
937
- warp_mask = 1 - F.max_pool2d(blend_mask_0, kernel_size=8)
938
- warp_flow = F.interpolate(bwd_flow_0 / 8.0, scale_factor=1.0 / 8, mode="bilinear")
939
-
940
- # 5.2 Prepare controlnet_conditioning_image
941
- # Currently we only support single control
942
- if isinstance(controlnet, ControlNetModel):
943
- control_image = self.prepare_control_image(
944
- image=control_image,
945
- width=width,
946
- height=height,
947
- batch_size=batch_size,
948
- num_images_per_prompt=1,
949
- device=device,
950
- dtype=controlnet.dtype,
951
- do_classifier_free_guidance=do_classifier_free_guidance,
952
- guess_mode=guess_mode,
953
- )
954
- else:
955
- assert False
956
-
957
- # 5.3 Prepare timesteps
958
- self.scheduler.set_timesteps(num_inference_steps, device=device)
959
- timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
960
- latent_timestep = timesteps[:1].repeat(batch_size)
961
-
962
- skip_t = int(num_inference_steps * (1 - strength))
963
- warp_start_t = int(warp_start * num_inference_steps)
964
- warp_end_t = int(warp_end * num_inference_steps)
965
- mask_start_t = int(mask_start * num_inference_steps)
966
- mask_end_t = int(mask_end * num_inference_steps)
967
-
968
- # 5.4 Prepare latent variables
969
- init_latents = self.prepare_latents(
970
- image,
971
- latent_timestep,
972
- batch_size,
973
- num_images_per_prompt,
974
- prompt_embeds.dtype,
975
- device,
976
- generator,
977
- )
978
-
979
- # 5.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
980
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
981
-
982
- # 5.6 Create tensor stating which controlnets to keep
983
- controlnet_keep = []
984
- for i in range(len(timesteps)):
985
- keeps = [
986
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
987
- for s, e in zip(control_guidance_start, control_guidance_end)
988
- ]
989
- controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
990
-
991
- # 5.7 Denoising loop
992
- num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order
993
-
994
- def denoising_loop(latents, mask=None, xtrg=None, noise_rescale=None):
995
- dir_xt = 0
996
- latents_dtype = latents.dtype
997
- with self.progress_bar(total=cur_num_inference_steps) as progress_bar:
998
- for i, t in enumerate(timesteps):
999
- self.attn_state.set_timestep(t.item())
1000
- if i + skip_t >= mask_start_t and i + skip_t <= mask_end_t and xtrg is not None:
1001
- rescale = torch.maximum(1.0 - mask, (1 - mask**2) ** 0.5 * inner_strength)
1002
- if noise_rescale is not None:
1003
- rescale = (1.0 - mask) * (1 - noise_rescale) + rescale * noise_rescale
1004
- noise = randn_tensor(xtrg.shape, generator=generator, device=device, dtype=xtrg.dtype)
1005
- latents_ref = self.scheduler.add_noise(xtrg, noise, t)
1006
- latents = latents_ref * mask + (1.0 - mask) * (latents - dir_xt) + rescale * dir_xt
1007
- latents = latents.to(latents_dtype)
1008
-
1009
- # expand the latents if we are doing classifier free guidance
1010
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1011
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1012
-
1013
- # controlnet(s) inference
1014
- if guess_mode and do_classifier_free_guidance:
1015
- # Infer ControlNet only for the conditional batch.
1016
- control_model_input = latents
1017
- control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1018
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1019
- else:
1020
- control_model_input = latent_model_input
1021
- controlnet_prompt_embeds = prompt_embeds
1022
-
1023
- if isinstance(controlnet_keep[i], list):
1024
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1025
- else:
1026
- controlnet_cond_scale = controlnet_conditioning_scale
1027
- if isinstance(controlnet_cond_scale, list):
1028
- controlnet_cond_scale = controlnet_cond_scale[0]
1029
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
1030
- down_block_res_samples, mid_block_res_sample = self.controlnet(
1031
- control_model_input,
1032
- t,
1033
- encoder_hidden_states=controlnet_prompt_embeds,
1034
- controlnet_cond=control_image,
1035
- conditioning_scale=cond_scale,
1036
- guess_mode=guess_mode,
1037
- return_dict=False,
1038
- )
1039
-
1040
- if guess_mode and do_classifier_free_guidance:
1041
- # Infered ControlNet only for the conditional batch.
1042
- # To apply the output of ControlNet to both the unconditional and conditional batches,
1043
- # add 0 to the unconditional batch to keep it unchanged.
1044
- down_block_res_samples = [
1045
- torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples
1046
- ]
1047
- mid_block_res_sample = torch.cat(
1048
- [torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
1049
- )
1050
-
1051
- # predict the noise residual
1052
- noise_pred = self.unet(
1053
- latent_model_input,
1054
- t,
1055
- encoder_hidden_states=prompt_embeds,
1056
- cross_attention_kwargs=cross_attention_kwargs,
1057
- down_block_additional_residuals=down_block_res_samples,
1058
- mid_block_additional_residual=mid_block_res_sample,
1059
- return_dict=False,
1060
- )[0]
1061
-
1062
- # perform guidance
1063
- if do_classifier_free_guidance:
1064
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1065
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1066
-
1067
- # Get pred_x0 from scheduler
1068
- alpha_prod_t = self.scheduler.alphas_cumprod[t]
1069
- beta_prod_t = 1 - alpha_prod_t
1070
- pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
1071
-
1072
- if i + skip_t >= warp_start_t and i + skip_t <= warp_end_t:
1073
- # warp x_0
1074
- pred_x0 = (
1075
- flow_warp(first_x0_list[i], warp_flow, mode="nearest") * warp_mask
1076
- + (1 - warp_mask) * pred_x0
1077
- )
1078
-
1079
- # get x_t from x_0
1080
- latents = self.scheduler.add_noise(pred_x0, noise_pred, t).to(latents_dtype)
1081
-
1082
- prev_t = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
1083
- if i == len(timesteps) - 1:
1084
- alpha_t_prev = 1.0
1085
- else:
1086
- alpha_t_prev = self.scheduler.alphas_cumprod[prev_t]
1087
-
1088
- dir_xt = (1.0 - alpha_t_prev) ** 0.5 * noise_pred
1089
-
1090
- # compute the previous noisy sample x_t -> x_t-1
1091
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[
1092
- 0
1093
- ]
1094
-
1095
- # call the callback, if provided
1096
- if i == len(timesteps) - 1 or (
1097
- (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
1098
- ):
1099
- progress_bar.update()
1100
- if callback is not None and i % callback_steps == 0:
1101
- callback(i, t, latents)
1102
-
1103
- return latents
1104
-
1105
- if mask_start_t <= mask_end_t:
1106
- self.attn_state.to_load()
1107
- else:
1108
- self.attn_state.to_load_and_store_prev()
1109
- latents = denoising_loop(init_latents)
1110
-
1111
- if mask_start_t <= mask_end_t:
1112
- direct_result = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1113
-
1114
- blend_results = (1 - blend_mask_pre) * warped_pre + blend_mask_pre * direct_result
1115
- blend_results = (1 - blend_mask_0) * warped_0 + blend_mask_0 * blend_results
1116
-
1117
- bwd_occ = 1 - torch.clamp(1 - bwd_occ_pre + 1 - bwd_occ_0, 0, 1)
1118
- blend_mask = blur(F.max_pool2d(bwd_occ, kernel_size=9, stride=1, padding=4))
1119
- blend_mask = 1 - torch.clamp(blend_mask + bwd_occ, 0, 1)
1120
-
1121
- blend_results = blend_results.to(latents.dtype)
1122
- xtrg = self.vae.encode(blend_results).latent_dist.sample(generator)
1123
- xtrg = self.vae.config.scaling_factor * xtrg
1124
- blend_results_rec = self.vae.decode(xtrg / self.vae.config.scaling_factor, return_dict=False)[0]
1125
- xtrg_rec = self.vae.encode(blend_results_rec).latent_dist.sample(generator)
1126
- xtrg_rec = self.vae.config.scaling_factor * xtrg_rec
1127
- xtrg_ = xtrg + (xtrg - xtrg_rec)
1128
- blend_results_rec_new = self.vae.decode(xtrg_ / self.vae.config.scaling_factor, return_dict=False)[0]
1129
- tmp = (abs(blend_results_rec_new - blend_results).mean(dim=1, keepdims=True) > 0.25).float()
1130
-
1131
- mask_x = F.max_pool2d(
1132
- (F.interpolate(tmp, scale_factor=1 / 8.0, mode="bilinear") > 0).float(),
1133
- kernel_size=3,
1134
- stride=1,
1135
- padding=1,
1136
- )
1137
-
1138
- mask = 1 - F.max_pool2d(1 - blend_mask, kernel_size=8) # * (1-mask_x)
1139
-
1140
- if smooth_boundary:
1141
- noise_rescale = find_flat_region(mask)
1142
- else:
1143
- noise_rescale = torch.ones_like(mask)
1144
-
1145
- xtrg = (xtrg + (1 - mask_x) * (xtrg - xtrg_rec)) * mask
1146
- xtrg = xtrg.to(latents.dtype)
1147
-
1148
- self.scheduler.set_timesteps(num_inference_steps, device=device)
1149
- timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
1150
-
1151
- self.attn_state.to_load_and_store_prev()
1152
- latents = denoising_loop(init_latents, mask * mask_strength, xtrg, noise_rescale)
1153
-
1154
- if not output_type == "latent":
1155
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1156
- else:
1157
- image = latents
1158
-
1159
- prev_result = image
1160
-
1161
- do_denormalize = [True] * image.shape[0]
1162
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1163
-
1164
- output_frames.append(image[0])
1165
-
1166
- # Offload last model to CPU
1167
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1168
- self.final_offload_hook.offload()
1169
-
1170
- if not return_dict:
1171
- return output_frames
1172
-
1173
- return TextToVideoSDPipelineOutput(frames=output_frames)
1174
-
1175
-
1176
- class InputPadder:
1177
- """Pads images such that dimensions are divisible by 8"""
1178
-
1179
- def __init__(self, dims, mode="sintel", padding_factor=8):
1180
- self.ht, self.wd = dims[-2:]
1181
- pad_ht = (((self.ht // padding_factor) + 1) * padding_factor - self.ht) % padding_factor
1182
- pad_wd = (((self.wd // padding_factor) + 1) * padding_factor - self.wd) % padding_factor
1183
- if mode == "sintel":
1184
- self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, pad_ht // 2, pad_ht - pad_ht // 2]
1185
- else:
1186
- self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
1187
-
1188
- def pad(self, *inputs):
1189
- return [F.pad(x, self._pad, mode="replicate") for x in inputs]
1190
-
1191
- def unpad(self, x):
1192
- ht, wd = x.shape[-2:]
1193
- c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
1194
- return x[..., c[0] : c[1], c[2] : c[3]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
run_onnx_controlnet.py DELETED
@@ -1,911 +0,0 @@
1
- import argparse
2
- import inspect
3
- import os
4
- import time
5
- import warnings
6
- from typing import Any, Callable, Dict, List, Optional, Union
7
-
8
- import numpy as np
9
- import PIL.Image
10
- import torch
11
- from PIL import Image
12
- from transformers import CLIPTokenizer
13
-
14
- from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
15
- from diffusers.image_processor import VaeImageProcessor
16
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline
17
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
18
- from diffusers.schedulers import KarrasDiffusionSchedulers
19
- from diffusers.utils import (
20
- deprecate,
21
- logging,
22
- replace_example_docstring,
23
- )
24
- from diffusers.utils.torch_utils import randn_tensor
25
-
26
-
27
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
-
29
-
30
- EXAMPLE_DOC_STRING = """
31
- Examples:
32
- ```py
33
- >>> # !pip install opencv-python transformers accelerate
34
- >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
35
- >>> from diffusers.utils import load_image
36
- >>> import numpy as np
37
- >>> import torch
38
-
39
- >>> import cv2
40
- >>> from PIL import Image
41
-
42
- >>> # download an image
43
- >>> image = load_image(
44
- ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
45
- ... )
46
- >>> np_image = np.array(image)
47
-
48
- >>> # get canny image
49
- >>> np_image = cv2.Canny(np_image, 100, 200)
50
- >>> np_image = np_image[:, :, None]
51
- >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
52
- >>> canny_image = Image.fromarray(np_image)
53
-
54
- >>> # load control net and stable diffusion v1-5
55
- >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
56
- >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
57
- ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
58
- ... )
59
-
60
- >>> # speed up diffusion process with faster scheduler and memory optimization
61
- >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
62
- >>> pipe.enable_model_cpu_offload()
63
-
64
- >>> # generate image
65
- >>> generator = torch.manual_seed(0)
66
- >>> image = pipe(
67
- ... "futuristic-looking woman",
68
- ... num_inference_steps=20,
69
- ... generator=generator,
70
- ... image=image,
71
- ... control_image=canny_image,
72
- ... ).images[0]
73
- ```
74
- """
75
-
76
-
77
- def prepare_image(image):
78
- if isinstance(image, torch.Tensor):
79
- # Batch single image
80
- if image.ndim == 3:
81
- image = image.unsqueeze(0)
82
-
83
- image = image.to(dtype=torch.float32)
84
- else:
85
- # preprocess image
86
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
87
- image = [image]
88
-
89
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
90
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
91
- image = np.concatenate(image, axis=0)
92
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
93
- image = np.concatenate([i[None, :] for i in image], axis=0)
94
-
95
- image = image.transpose(0, 3, 1, 2)
96
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
97
-
98
- return image
99
-
100
-
101
- class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
102
- vae_encoder: OnnxRuntimeModel
103
- vae_decoder: OnnxRuntimeModel
104
- text_encoder: OnnxRuntimeModel
105
- tokenizer: CLIPTokenizer
106
- unet: OnnxRuntimeModel
107
- scheduler: KarrasDiffusionSchedulers
108
-
109
- def __init__(
110
- self,
111
- vae_encoder: OnnxRuntimeModel,
112
- vae_decoder: OnnxRuntimeModel,
113
- text_encoder: OnnxRuntimeModel,
114
- tokenizer: CLIPTokenizer,
115
- unet: OnnxRuntimeModel,
116
- scheduler: KarrasDiffusionSchedulers,
117
- ):
118
- super().__init__()
119
-
120
- self.register_modules(
121
- vae_encoder=vae_encoder,
122
- vae_decoder=vae_decoder,
123
- text_encoder=text_encoder,
124
- tokenizer=tokenizer,
125
- unet=unet,
126
- scheduler=scheduler,
127
- )
128
- self.vae_scale_factor = 2 ** (4 - 1)
129
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
130
- self.control_image_processor = VaeImageProcessor(
131
- vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
132
- )
133
-
134
- def _encode_prompt(
135
- self,
136
- prompt: Union[str, List[str]],
137
- num_images_per_prompt: Optional[int],
138
- do_classifier_free_guidance: bool,
139
- negative_prompt: Optional[str],
140
- prompt_embeds: Optional[np.ndarray] = None,
141
- negative_prompt_embeds: Optional[np.ndarray] = None,
142
- ):
143
- r"""
144
- Encodes the prompt into text encoder hidden states.
145
-
146
- Args:
147
- prompt (`str` or `List[str]`):
148
- prompt to be encoded
149
- num_images_per_prompt (`int`):
150
- number of images that should be generated per prompt
151
- do_classifier_free_guidance (`bool`):
152
- whether to use classifier free guidance or not
153
- negative_prompt (`str` or `List[str]`):
154
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
155
- if `guidance_scale` is less than `1`).
156
- prompt_embeds (`np.ndarray`, *optional*):
157
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
158
- provided, text embeddings will be generated from `prompt` input argument.
159
- negative_prompt_embeds (`np.ndarray`, *optional*):
160
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
161
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
162
- argument.
163
- """
164
- if prompt is not None and isinstance(prompt, str):
165
- batch_size = 1
166
- elif prompt is not None and isinstance(prompt, list):
167
- batch_size = len(prompt)
168
- else:
169
- batch_size = prompt_embeds.shape[0]
170
-
171
- if prompt_embeds is None:
172
- # get prompt text embeddings
173
- text_inputs = self.tokenizer(
174
- prompt,
175
- padding="max_length",
176
- max_length=self.tokenizer.model_max_length,
177
- truncation=True,
178
- return_tensors="np",
179
- )
180
- text_input_ids = text_inputs.input_ids
181
- untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
182
-
183
- if not np.array_equal(text_input_ids, untruncated_ids):
184
- removed_text = self.tokenizer.batch_decode(
185
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
186
- )
187
- logger.warning(
188
- "The following part of your input was truncated because CLIP can only handle sequences up to"
189
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
190
- )
191
-
192
- prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
193
-
194
- prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
195
-
196
- # get unconditional embeddings for classifier free guidance
197
- if do_classifier_free_guidance and negative_prompt_embeds is None:
198
- uncond_tokens: List[str]
199
- if negative_prompt is None:
200
- uncond_tokens = [""] * batch_size
201
- elif type(prompt) is not type(negative_prompt):
202
- raise TypeError(
203
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
204
- f" {type(prompt)}."
205
- )
206
- elif isinstance(negative_prompt, str):
207
- uncond_tokens = [negative_prompt] * batch_size
208
- elif batch_size != len(negative_prompt):
209
- raise ValueError(
210
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
211
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
212
- " the batch size of `prompt`."
213
- )
214
- else:
215
- uncond_tokens = negative_prompt
216
-
217
- max_length = prompt_embeds.shape[1]
218
- uncond_input = self.tokenizer(
219
- uncond_tokens,
220
- padding="max_length",
221
- max_length=max_length,
222
- truncation=True,
223
- return_tensors="np",
224
- )
225
- negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
226
-
227
- if do_classifier_free_guidance:
228
- negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
229
-
230
- # For classifier free guidance, we need to do two forward passes.
231
- # Here we concatenate the unconditional and text embeddings into a single batch
232
- # to avoid doing two forward passes
233
- prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
234
-
235
- return prompt_embeds
236
-
237
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
238
- def decode_latents(self, latents):
239
- warnings.warn(
240
- "The decode_latents method is deprecated and will be removed in a future version. Please"
241
- " use VaeImageProcessor instead",
242
- FutureWarning,
243
- )
244
- latents = 1 / self.vae.config.scaling_factor * latents
245
- image = self.vae.decode(latents, return_dict=False)[0]
246
- image = (image / 2 + 0.5).clamp(0, 1)
247
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
248
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
249
- return image
250
-
251
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
252
- def prepare_extra_step_kwargs(self, generator, eta):
253
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
254
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
255
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
256
- # and should be between [0, 1]
257
-
258
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
259
- extra_step_kwargs = {}
260
- if accepts_eta:
261
- extra_step_kwargs["eta"] = eta
262
-
263
- # check if the scheduler accepts generator
264
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
265
- if accepts_generator:
266
- extra_step_kwargs["generator"] = generator
267
- return extra_step_kwargs
268
-
269
- def check_inputs(
270
- self,
271
- num_controlnet,
272
- prompt,
273
- image,
274
- callback_steps,
275
- negative_prompt=None,
276
- prompt_embeds=None,
277
- negative_prompt_embeds=None,
278
- controlnet_conditioning_scale=1.0,
279
- control_guidance_start=0.0,
280
- control_guidance_end=1.0,
281
- ):
282
- if (callback_steps is None) or (
283
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
284
- ):
285
- raise ValueError(
286
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
287
- f" {type(callback_steps)}."
288
- )
289
-
290
- if prompt is not None and prompt_embeds is not None:
291
- raise ValueError(
292
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
293
- " only forward one of the two."
294
- )
295
- elif prompt is None and prompt_embeds is None:
296
- raise ValueError(
297
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
298
- )
299
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
300
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
301
-
302
- if negative_prompt is not None and negative_prompt_embeds is not None:
303
- raise ValueError(
304
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
305
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
306
- )
307
-
308
- if prompt_embeds is not None and negative_prompt_embeds is not None:
309
- if prompt_embeds.shape != negative_prompt_embeds.shape:
310
- raise ValueError(
311
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
312
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
313
- f" {negative_prompt_embeds.shape}."
314
- )
315
-
316
- # Check `image`
317
- if num_controlnet == 1:
318
- self.check_image(image, prompt, prompt_embeds)
319
- elif num_controlnet > 1:
320
- if not isinstance(image, list):
321
- raise TypeError("For multiple controlnets: `image` must be type `list`")
322
-
323
- # When `image` is a nested list:
324
- # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
325
- elif any(isinstance(i, list) for i in image):
326
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
327
- elif len(image) != num_controlnet:
328
- raise ValueError(
329
- f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
330
- )
331
-
332
- for image_ in image:
333
- self.check_image(image_, prompt, prompt_embeds)
334
- else:
335
- assert False
336
-
337
- # Check `controlnet_conditioning_scale`
338
- if num_controlnet == 1:
339
- if not isinstance(controlnet_conditioning_scale, float):
340
- raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
341
- elif num_controlnet > 1:
342
- if isinstance(controlnet_conditioning_scale, list):
343
- if any(isinstance(i, list) for i in controlnet_conditioning_scale):
344
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
345
- elif (
346
- isinstance(controlnet_conditioning_scale, list)
347
- and len(controlnet_conditioning_scale) != num_controlnet
348
- ):
349
- raise ValueError(
350
- "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
351
- " the same length as the number of controlnets"
352
- )
353
- else:
354
- assert False
355
-
356
- if len(control_guidance_start) != len(control_guidance_end):
357
- raise ValueError(
358
- f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
359
- )
360
-
361
- if num_controlnet > 1:
362
- if len(control_guidance_start) != num_controlnet:
363
- raise ValueError(
364
- f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
365
- )
366
-
367
- for start, end in zip(control_guidance_start, control_guidance_end):
368
- if start >= end:
369
- raise ValueError(
370
- f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
371
- )
372
- if start < 0.0:
373
- raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
374
- if end > 1.0:
375
- raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
376
-
377
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
378
- def check_image(self, image, prompt, prompt_embeds):
379
- image_is_pil = isinstance(image, PIL.Image.Image)
380
- image_is_tensor = isinstance(image, torch.Tensor)
381
- image_is_np = isinstance(image, np.ndarray)
382
- image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
383
- image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
384
- image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
385
-
386
- if (
387
- not image_is_pil
388
- and not image_is_tensor
389
- and not image_is_np
390
- and not image_is_pil_list
391
- and not image_is_tensor_list
392
- and not image_is_np_list
393
- ):
394
- raise TypeError(
395
- f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
396
- )
397
-
398
- if image_is_pil:
399
- image_batch_size = 1
400
- else:
401
- image_batch_size = len(image)
402
-
403
- if prompt is not None and isinstance(prompt, str):
404
- prompt_batch_size = 1
405
- elif prompt is not None and isinstance(prompt, list):
406
- prompt_batch_size = len(prompt)
407
- elif prompt_embeds is not None:
408
- prompt_batch_size = prompt_embeds.shape[0]
409
-
410
- if image_batch_size != 1 and image_batch_size != prompt_batch_size:
411
- raise ValueError(
412
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
413
- )
414
-
415
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
416
- def prepare_control_image(
417
- self,
418
- image,
419
- width,
420
- height,
421
- batch_size,
422
- num_images_per_prompt,
423
- device,
424
- dtype,
425
- do_classifier_free_guidance=False,
426
- guess_mode=False,
427
- ):
428
- image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
429
- image_batch_size = image.shape[0]
430
-
431
- if image_batch_size == 1:
432
- repeat_by = batch_size
433
- else:
434
- # image batch size is the same as prompt batch size
435
- repeat_by = num_images_per_prompt
436
-
437
- image = image.repeat_interleave(repeat_by, dim=0)
438
-
439
- image = image.to(device=device, dtype=dtype)
440
-
441
- if do_classifier_free_guidance and not guess_mode:
442
- image = torch.cat([image] * 2)
443
-
444
- return image
445
-
446
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
447
- def get_timesteps(self, num_inference_steps, strength, device):
448
- # get the original timestep using init_timestep
449
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
450
-
451
- t_start = max(num_inference_steps - init_timestep, 0)
452
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
453
-
454
- return timesteps, num_inference_steps - t_start
455
-
456
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
457
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
458
- raise ValueError(
459
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
460
- )
461
-
462
- image = image.to(device=device, dtype=dtype)
463
-
464
- batch_size = batch_size * num_images_per_prompt
465
-
466
- if image.shape[1] == 4:
467
- init_latents = image
468
-
469
- else:
470
- _image = image.cpu().detach().numpy()
471
- init_latents = self.vae_encoder(sample=_image)[0]
472
- init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
473
- init_latents = 0.18215 * init_latents
474
-
475
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
476
- # expand init_latents for batch_size
477
- deprecation_message = (
478
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
479
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
480
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
481
- " your script to pass as many initial images as text prompts to suppress this warning."
482
- )
483
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
484
- additional_image_per_prompt = batch_size // init_latents.shape[0]
485
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
486
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
487
- raise ValueError(
488
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
489
- )
490
- else:
491
- init_latents = torch.cat([init_latents], dim=0)
492
-
493
- shape = init_latents.shape
494
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
495
-
496
- # get latents
497
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
498
- latents = init_latents
499
-
500
- return latents
501
-
502
- @torch.no_grad()
503
- @replace_example_docstring(EXAMPLE_DOC_STRING)
504
- def __call__(
505
- self,
506
- num_controlnet: int,
507
- fp16: bool = True,
508
- prompt: Union[str, List[str]] = None,
509
- image: Union[
510
- torch.Tensor,
511
- PIL.Image.Image,
512
- np.ndarray,
513
- List[torch.Tensor],
514
- List[PIL.Image.Image],
515
- List[np.ndarray],
516
- ] = None,
517
- control_image: Union[
518
- torch.Tensor,
519
- PIL.Image.Image,
520
- np.ndarray,
521
- List[torch.Tensor],
522
- List[PIL.Image.Image],
523
- List[np.ndarray],
524
- ] = None,
525
- height: Optional[int] = None,
526
- width: Optional[int] = None,
527
- strength: float = 0.8,
528
- num_inference_steps: int = 50,
529
- guidance_scale: float = 7.5,
530
- negative_prompt: Optional[Union[str, List[str]]] = None,
531
- num_images_per_prompt: Optional[int] = 1,
532
- eta: float = 0.0,
533
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
534
- latents: Optional[torch.Tensor] = None,
535
- prompt_embeds: Optional[torch.Tensor] = None,
536
- negative_prompt_embeds: Optional[torch.Tensor] = None,
537
- output_type: Optional[str] = "pil",
538
- return_dict: bool = True,
539
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
540
- callback_steps: int = 1,
541
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
542
- controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
543
- guess_mode: bool = False,
544
- control_guidance_start: Union[float, List[float]] = 0.0,
545
- control_guidance_end: Union[float, List[float]] = 1.0,
546
- ):
547
- r"""
548
- Function invoked when calling the pipeline for generation.
549
-
550
- Args:
551
- prompt (`str` or `List[str]`, *optional*):
552
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
553
- instead.
554
- image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
555
- `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
556
- The initial image will be used as the starting point for the image generation process. Can also accept
557
- image latents as `image`, if passing latents directly, it will not be encoded again.
558
- control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
559
- `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
560
- The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
561
- the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
562
- also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
563
- height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
564
- specified in init, images must be passed as a list such that each element of the list can be correctly
565
- batched for input to a single controlnet.
566
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
567
- The height in pixels of the generated image.
568
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
569
- The width in pixels of the generated image.
570
- num_inference_steps (`int`, *optional*, defaults to 50):
571
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
572
- expense of slower inference.
573
- guidance_scale (`float`, *optional*, defaults to 7.5):
574
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
575
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
576
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
577
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
578
- usually at the expense of lower image quality.
579
- negative_prompt (`str` or `List[str]`, *optional*):
580
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
581
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
582
- less than `1`).
583
- num_images_per_prompt (`int`, *optional*, defaults to 1):
584
- The number of images to generate per prompt.
585
- eta (`float`, *optional*, defaults to 0.0):
586
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
587
- [`schedulers.DDIMScheduler`], will be ignored for others.
588
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
589
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
590
- to make generation deterministic.
591
- latents (`torch.Tensor`, *optional*):
592
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
593
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
594
- tensor will ge generated by sampling using the supplied random `generator`.
595
- prompt_embeds (`torch.Tensor`, *optional*):
596
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
597
- provided, text embeddings will be generated from `prompt` input argument.
598
- negative_prompt_embeds (`torch.Tensor`, *optional*):
599
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
600
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
601
- argument.
602
- output_type (`str`, *optional*, defaults to `"pil"`):
603
- The output format of the generate image. Choose between
604
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
605
- return_dict (`bool`, *optional*, defaults to `True`):
606
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
607
- plain tuple.
608
- callback (`Callable`, *optional*):
609
- A function that will be called every `callback_steps` steps during inference. The function will be
610
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
611
- callback_steps (`int`, *optional*, defaults to 1):
612
- The frequency at which the `callback` function will be called. If not specified, the callback will be
613
- called at every step.
614
- cross_attention_kwargs (`dict`, *optional*):
615
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
616
- `self.processor` in
617
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
618
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
619
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
620
- to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
621
- corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
622
- than for [`~StableDiffusionControlNetPipeline.__call__`].
623
- guess_mode (`bool`, *optional*, defaults to `False`):
624
- In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
625
- you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
626
- control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
627
- The percentage of total steps at which the controlnet starts applying.
628
- control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
629
- The percentage of total steps at which the controlnet stops applying.
630
-
631
- Examples:
632
-
633
- Returns:
634
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
635
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
636
- When returning a tuple, the first element is a list with the generated images, and the second element is a
637
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
638
- (nsfw) content, according to the `safety_checker`.
639
- """
640
- if fp16:
641
- torch_dtype = torch.float16
642
- np_dtype = np.float16
643
- else:
644
- torch_dtype = torch.float32
645
- np_dtype = np.float32
646
-
647
- # align format for control guidance
648
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
649
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
650
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
651
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
652
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
653
- mult = num_controlnet
654
- control_guidance_start, control_guidance_end = (
655
- mult * [control_guidance_start],
656
- mult * [control_guidance_end],
657
- )
658
-
659
- # 1. Check inputs. Raise error if not correct
660
- self.check_inputs(
661
- num_controlnet,
662
- prompt,
663
- control_image,
664
- callback_steps,
665
- negative_prompt,
666
- prompt_embeds,
667
- negative_prompt_embeds,
668
- controlnet_conditioning_scale,
669
- control_guidance_start,
670
- control_guidance_end,
671
- )
672
-
673
- # 2. Define call parameters
674
- if prompt is not None and isinstance(prompt, str):
675
- batch_size = 1
676
- elif prompt is not None and isinstance(prompt, list):
677
- batch_size = len(prompt)
678
- else:
679
- batch_size = prompt_embeds.shape[0]
680
-
681
- device = self._execution_device
682
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
683
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
684
- # corresponds to doing no classifier free guidance.
685
- do_classifier_free_guidance = guidance_scale > 1.0
686
-
687
- if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
688
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
689
-
690
- # 3. Encode input prompt
691
- prompt_embeds = self._encode_prompt(
692
- prompt,
693
- num_images_per_prompt,
694
- do_classifier_free_guidance,
695
- negative_prompt,
696
- prompt_embeds=prompt_embeds,
697
- negative_prompt_embeds=negative_prompt_embeds,
698
- )
699
- # 4. Prepare image
700
- image = self.image_processor.preprocess(image).to(dtype=torch.float32)
701
-
702
- # 5. Prepare controlnet_conditioning_image
703
- if num_controlnet == 1:
704
- control_image = self.prepare_control_image(
705
- image=control_image,
706
- width=width,
707
- height=height,
708
- batch_size=batch_size * num_images_per_prompt,
709
- num_images_per_prompt=num_images_per_prompt,
710
- device=device,
711
- dtype=torch_dtype,
712
- do_classifier_free_guidance=do_classifier_free_guidance,
713
- guess_mode=guess_mode,
714
- )
715
- elif num_controlnet > 1:
716
- control_images = []
717
-
718
- for control_image_ in control_image:
719
- control_image_ = self.prepare_control_image(
720
- image=control_image_,
721
- width=width,
722
- height=height,
723
- batch_size=batch_size * num_images_per_prompt,
724
- num_images_per_prompt=num_images_per_prompt,
725
- device=device,
726
- dtype=torch_dtype,
727
- do_classifier_free_guidance=do_classifier_free_guidance,
728
- guess_mode=guess_mode,
729
- )
730
-
731
- control_images.append(control_image_)
732
-
733
- control_image = control_images
734
- else:
735
- assert False
736
-
737
- # 5. Prepare timesteps
738
- self.scheduler.set_timesteps(num_inference_steps, device=device)
739
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
740
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
741
-
742
- # 6. Prepare latent variables
743
- latents = self.prepare_latents(
744
- image,
745
- latent_timestep,
746
- batch_size,
747
- num_images_per_prompt,
748
- torch_dtype,
749
- device,
750
- generator,
751
- )
752
-
753
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
754
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
755
-
756
- # 7.1 Create tensor stating which controlnets to keep
757
- controlnet_keep = []
758
- for i in range(len(timesteps)):
759
- keeps = [
760
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
761
- for s, e in zip(control_guidance_start, control_guidance_end)
762
- ]
763
- controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
764
-
765
- # 8. Denoising loop
766
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
767
- with self.progress_bar(total=num_inference_steps) as progress_bar:
768
- for i, t in enumerate(timesteps):
769
- # expand the latents if we are doing classifier free guidance
770
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
771
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
772
-
773
- if isinstance(controlnet_keep[i], list):
774
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
775
- else:
776
- controlnet_cond_scale = controlnet_conditioning_scale
777
- if isinstance(controlnet_cond_scale, list):
778
- controlnet_cond_scale = controlnet_cond_scale[0]
779
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
780
-
781
- # predict the noise residual
782
- _latent_model_input = latent_model_input.cpu().detach().numpy()
783
- _prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
784
- _t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
785
-
786
- if num_controlnet == 1:
787
- control_images = np.array([control_image], dtype=np_dtype)
788
- else:
789
- control_images = []
790
- for _control_img in control_image:
791
- _control_img = _control_img.cpu().detach().numpy()
792
- control_images.append(_control_img)
793
- control_images = np.array(control_images, dtype=np_dtype)
794
-
795
- control_scales = np.array(cond_scale, dtype=np_dtype)
796
- control_scales = np.resize(control_scales, (num_controlnet, 1))
797
-
798
- noise_pred = self.unet(
799
- sample=_latent_model_input,
800
- timestep=_t,
801
- encoder_hidden_states=_prompt_embeds,
802
- controlnet_conds=control_images,
803
- conditioning_scales=control_scales,
804
- )[0]
805
- noise_pred = torch.from_numpy(noise_pred).to(device)
806
-
807
- # perform guidance
808
- if do_classifier_free_guidance:
809
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
810
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
811
-
812
- # compute the previous noisy sample x_t -> x_t-1
813
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
814
-
815
- # call the callback, if provided
816
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
817
- progress_bar.update()
818
- if callback is not None and i % callback_steps == 0:
819
- step_idx = i // getattr(self.scheduler, "order", 1)
820
- callback(step_idx, t, latents)
821
-
822
- if not output_type == "latent":
823
- _latents = latents.cpu().detach().numpy() / 0.18215
824
- _latents = np.array(_latents, dtype=np_dtype)
825
- image = self.vae_decoder(latent_sample=_latents)[0]
826
- image = torch.from_numpy(image).to(device, dtype=torch.float32)
827
- has_nsfw_concept = None
828
- else:
829
- image = latents
830
- has_nsfw_concept = None
831
-
832
- if has_nsfw_concept is None:
833
- do_denormalize = [True] * image.shape[0]
834
- else:
835
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
836
-
837
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
838
-
839
- if not return_dict:
840
- return (image, has_nsfw_concept)
841
-
842
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
843
-
844
-
845
- if __name__ == "__main__":
846
- parser = argparse.ArgumentParser()
847
-
848
- parser.add_argument(
849
- "--sd_model",
850
- type=str,
851
- required=True,
852
- help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
853
- )
854
-
855
- parser.add_argument(
856
- "--onnx_model_dir",
857
- type=str,
858
- required=True,
859
- help="Path to the ONNX directory",
860
- )
861
-
862
- parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
863
-
864
- args = parser.parse_args()
865
-
866
- qr_image = Image.open(args.qr_img_path)
867
- qr_image = qr_image.resize((512, 512))
868
-
869
- # init stable diffusion pipeline
870
- pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
871
- pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
872
-
873
- provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
874
- onnx_pipeline = OnnxStableDiffusionControlNetImg2ImgPipeline(
875
- vae_encoder=OnnxRuntimeModel.from_pretrained(
876
- os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
877
- ),
878
- vae_decoder=OnnxRuntimeModel.from_pretrained(
879
- os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
880
- ),
881
- text_encoder=OnnxRuntimeModel.from_pretrained(
882
- os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
883
- ),
884
- tokenizer=pipeline.tokenizer,
885
- unet=OnnxRuntimeModel.from_pretrained(os.path.join(args.onnx_model_dir, "unet"), provider=provider),
886
- scheduler=pipeline.scheduler,
887
- )
888
- onnx_pipeline = onnx_pipeline.to("cuda")
889
-
890
- prompt = "a cute cat fly to the moon"
891
- negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
892
-
893
- for i in range(10):
894
- start_time = time.time()
895
- image = onnx_pipeline(
896
- num_controlnet=2,
897
- prompt=prompt,
898
- negative_prompt=negative_prompt,
899
- image=qr_image,
900
- control_image=[qr_image, qr_image],
901
- width=512,
902
- height=512,
903
- strength=0.75,
904
- num_inference_steps=20,
905
- num_images_per_prompt=1,
906
- controlnet_conditioning_scale=[0.8, 0.8],
907
- control_guidance_start=[0.3, 0.3],
908
- control_guidance_end=[0.9, 0.9],
909
- ).images[0]
910
- print(time.time() - start_time)
911
- image.save("output_qr_code.png")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
run_tensorrt_controlnet.py DELETED
@@ -1,1022 +0,0 @@
1
- import argparse
2
- import atexit
3
- import inspect
4
- import os
5
- import time
6
- import warnings
7
- from typing import Any, Callable, Dict, List, Optional, Union
8
-
9
- import numpy as np
10
- import PIL.Image
11
- import pycuda.driver as cuda
12
- import tensorrt as trt
13
- import torch
14
- from PIL import Image
15
- from pycuda.tools import make_default_context
16
- from transformers import CLIPTokenizer
17
-
18
- from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
19
- from diffusers.image_processor import VaeImageProcessor
20
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline
21
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
22
- from diffusers.schedulers import KarrasDiffusionSchedulers
23
- from diffusers.utils import (
24
- deprecate,
25
- logging,
26
- replace_example_docstring,
27
- )
28
- from diffusers.utils.torch_utils import randn_tensor
29
-
30
-
31
- # Initialize CUDA
32
- cuda.init()
33
- context = make_default_context()
34
- device = context.get_device()
35
- atexit.register(context.pop)
36
-
37
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
38
-
39
-
40
- def load_engine(trt_runtime, engine_path):
41
- with open(engine_path, "rb") as f:
42
- engine_data = f.read()
43
- engine = trt_runtime.deserialize_cuda_engine(engine_data)
44
- return engine
45
-
46
-
47
- class TensorRTModel:
48
- def __init__(
49
- self,
50
- trt_engine_path,
51
- **kwargs,
52
- ):
53
- cuda.init()
54
- stream = cuda.Stream()
55
- TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
56
- trt.init_libnvinfer_plugins(TRT_LOGGER, "")
57
- trt_runtime = trt.Runtime(TRT_LOGGER)
58
- engine = load_engine(trt_runtime, trt_engine_path)
59
- context = engine.create_execution_context()
60
-
61
- # allocates memory for network inputs/outputs on both CPU and GPU
62
- host_inputs = []
63
- cuda_inputs = []
64
- host_outputs = []
65
- cuda_outputs = []
66
- bindings = []
67
- input_names = []
68
- output_names = []
69
-
70
- for binding in engine:
71
- datatype = engine.get_binding_dtype(binding)
72
- if datatype == trt.DataType.HALF:
73
- dtype = np.float16
74
- else:
75
- dtype = np.float32
76
-
77
- shape = tuple(engine.get_binding_shape(binding))
78
- host_mem = cuda.pagelocked_empty(shape, dtype)
79
- cuda_mem = cuda.mem_alloc(host_mem.nbytes)
80
- bindings.append(int(cuda_mem))
81
-
82
- if engine.binding_is_input(binding):
83
- host_inputs.append(host_mem)
84
- cuda_inputs.append(cuda_mem)
85
- input_names.append(binding)
86
- else:
87
- host_outputs.append(host_mem)
88
- cuda_outputs.append(cuda_mem)
89
- output_names.append(binding)
90
-
91
- self.stream = stream
92
- self.context = context
93
- self.engine = engine
94
-
95
- self.host_inputs = host_inputs
96
- self.cuda_inputs = cuda_inputs
97
- self.host_outputs = host_outputs
98
- self.cuda_outputs = cuda_outputs
99
- self.bindings = bindings
100
- self.batch_size = engine.max_batch_size
101
-
102
- self.input_names = input_names
103
- self.output_names = output_names
104
-
105
- def __call__(self, **kwargs):
106
- context = self.context
107
- stream = self.stream
108
- bindings = self.bindings
109
-
110
- host_inputs = self.host_inputs
111
- cuda_inputs = self.cuda_inputs
112
- host_outputs = self.host_outputs
113
- cuda_outputs = self.cuda_outputs
114
-
115
- for idx, input_name in enumerate(self.input_names):
116
- _input = kwargs[input_name]
117
- np.copyto(host_inputs[idx], _input)
118
- # transfer input data to the GPU
119
- cuda.memcpy_htod_async(cuda_inputs[idx], host_inputs[idx], stream)
120
-
121
- context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
122
-
123
- result = {}
124
- for idx, output_name in enumerate(self.output_names):
125
- # transfer predictions back from the GPU
126
- cuda.memcpy_dtoh_async(host_outputs[idx], cuda_outputs[idx], stream)
127
- result[output_name] = host_outputs[idx]
128
-
129
- stream.synchronize()
130
-
131
- return result
132
-
133
-
134
- EXAMPLE_DOC_STRING = """
135
- Examples:
136
- ```py
137
- >>> # !pip install opencv-python transformers accelerate
138
- >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
139
- >>> from diffusers.utils import load_image
140
- >>> import numpy as np
141
- >>> import torch
142
-
143
- >>> import cv2
144
- >>> from PIL import Image
145
-
146
- >>> # download an image
147
- >>> image = load_image(
148
- ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
149
- ... )
150
- >>> np_image = np.array(image)
151
-
152
- >>> # get canny image
153
- >>> np_image = cv2.Canny(np_image, 100, 200)
154
- >>> np_image = np_image[:, :, None]
155
- >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
156
- >>> canny_image = Image.fromarray(np_image)
157
-
158
- >>> # load control net and stable diffusion v1-5
159
- >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
160
- >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
161
- ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
162
- ... )
163
-
164
- >>> # speed up diffusion process with faster scheduler and memory optimization
165
- >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
166
- >>> pipe.enable_model_cpu_offload()
167
-
168
- >>> # generate image
169
- >>> generator = torch.manual_seed(0)
170
- >>> image = pipe(
171
- ... "futuristic-looking woman",
172
- ... num_inference_steps=20,
173
- ... generator=generator,
174
- ... image=image,
175
- ... control_image=canny_image,
176
- ... ).images[0]
177
- ```
178
- """
179
-
180
-
181
- def prepare_image(image):
182
- if isinstance(image, torch.Tensor):
183
- # Batch single image
184
- if image.ndim == 3:
185
- image = image.unsqueeze(0)
186
-
187
- image = image.to(dtype=torch.float32)
188
- else:
189
- # preprocess image
190
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
191
- image = [image]
192
-
193
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
194
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
195
- image = np.concatenate(image, axis=0)
196
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
197
- image = np.concatenate([i[None, :] for i in image], axis=0)
198
-
199
- image = image.transpose(0, 3, 1, 2)
200
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
201
-
202
- return image
203
-
204
-
205
- class TensorRTStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
206
- vae_encoder: OnnxRuntimeModel
207
- vae_decoder: OnnxRuntimeModel
208
- text_encoder: OnnxRuntimeModel
209
- tokenizer: CLIPTokenizer
210
- unet: TensorRTModel
211
- scheduler: KarrasDiffusionSchedulers
212
-
213
- def __init__(
214
- self,
215
- vae_encoder: OnnxRuntimeModel,
216
- vae_decoder: OnnxRuntimeModel,
217
- text_encoder: OnnxRuntimeModel,
218
- tokenizer: CLIPTokenizer,
219
- unet: TensorRTModel,
220
- scheduler: KarrasDiffusionSchedulers,
221
- ):
222
- super().__init__()
223
-
224
- self.register_modules(
225
- vae_encoder=vae_encoder,
226
- vae_decoder=vae_decoder,
227
- text_encoder=text_encoder,
228
- tokenizer=tokenizer,
229
- unet=unet,
230
- scheduler=scheduler,
231
- )
232
- self.vae_scale_factor = 2 ** (4 - 1)
233
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
234
- self.control_image_processor = VaeImageProcessor(
235
- vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
236
- )
237
-
238
- def _encode_prompt(
239
- self,
240
- prompt: Union[str, List[str]],
241
- num_images_per_prompt: Optional[int],
242
- do_classifier_free_guidance: bool,
243
- negative_prompt: Optional[str],
244
- prompt_embeds: Optional[np.ndarray] = None,
245
- negative_prompt_embeds: Optional[np.ndarray] = None,
246
- ):
247
- r"""
248
- Encodes the prompt into text encoder hidden states.
249
-
250
- Args:
251
- prompt (`str` or `List[str]`):
252
- prompt to be encoded
253
- num_images_per_prompt (`int`):
254
- number of images that should be generated per prompt
255
- do_classifier_free_guidance (`bool`):
256
- whether to use classifier free guidance or not
257
- negative_prompt (`str` or `List[str]`):
258
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
259
- if `guidance_scale` is less than `1`).
260
- prompt_embeds (`np.ndarray`, *optional*):
261
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
262
- provided, text embeddings will be generated from `prompt` input argument.
263
- negative_prompt_embeds (`np.ndarray`, *optional*):
264
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
265
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
266
- argument.
267
- """
268
- if prompt is not None and isinstance(prompt, str):
269
- batch_size = 1
270
- elif prompt is not None and isinstance(prompt, list):
271
- batch_size = len(prompt)
272
- else:
273
- batch_size = prompt_embeds.shape[0]
274
-
275
- if prompt_embeds is None:
276
- # get prompt text embeddings
277
- text_inputs = self.tokenizer(
278
- prompt,
279
- padding="max_length",
280
- max_length=self.tokenizer.model_max_length,
281
- truncation=True,
282
- return_tensors="np",
283
- )
284
- text_input_ids = text_inputs.input_ids
285
- untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
286
-
287
- if not np.array_equal(text_input_ids, untruncated_ids):
288
- removed_text = self.tokenizer.batch_decode(
289
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
290
- )
291
- logger.warning(
292
- "The following part of your input was truncated because CLIP can only handle sequences up to"
293
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
294
- )
295
-
296
- prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
297
-
298
- prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
299
-
300
- # get unconditional embeddings for classifier free guidance
301
- if do_classifier_free_guidance and negative_prompt_embeds is None:
302
- uncond_tokens: List[str]
303
- if negative_prompt is None:
304
- uncond_tokens = [""] * batch_size
305
- elif type(prompt) is not type(negative_prompt):
306
- raise TypeError(
307
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
308
- f" {type(prompt)}."
309
- )
310
- elif isinstance(negative_prompt, str):
311
- uncond_tokens = [negative_prompt] * batch_size
312
- elif batch_size != len(negative_prompt):
313
- raise ValueError(
314
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
315
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
316
- " the batch size of `prompt`."
317
- )
318
- else:
319
- uncond_tokens = negative_prompt
320
-
321
- max_length = prompt_embeds.shape[1]
322
- uncond_input = self.tokenizer(
323
- uncond_tokens,
324
- padding="max_length",
325
- max_length=max_length,
326
- truncation=True,
327
- return_tensors="np",
328
- )
329
- negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
330
-
331
- if do_classifier_free_guidance:
332
- negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
333
-
334
- # For classifier free guidance, we need to do two forward passes.
335
- # Here we concatenate the unconditional and text embeddings into a single batch
336
- # to avoid doing two forward passes
337
- prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
338
-
339
- return prompt_embeds
340
-
341
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
342
- def decode_latents(self, latents):
343
- warnings.warn(
344
- "The decode_latents method is deprecated and will be removed in a future version. Please"
345
- " use VaeImageProcessor instead",
346
- FutureWarning,
347
- )
348
- latents = 1 / self.vae.config.scaling_factor * latents
349
- image = self.vae.decode(latents, return_dict=False)[0]
350
- image = (image / 2 + 0.5).clamp(0, 1)
351
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
352
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
353
- return image
354
-
355
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
356
- def prepare_extra_step_kwargs(self, generator, eta):
357
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
358
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
359
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
360
- # and should be between [0, 1]
361
-
362
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
363
- extra_step_kwargs = {}
364
- if accepts_eta:
365
- extra_step_kwargs["eta"] = eta
366
-
367
- # check if the scheduler accepts generator
368
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
369
- if accepts_generator:
370
- extra_step_kwargs["generator"] = generator
371
- return extra_step_kwargs
372
-
373
- def check_inputs(
374
- self,
375
- num_controlnet,
376
- prompt,
377
- image,
378
- callback_steps,
379
- negative_prompt=None,
380
- prompt_embeds=None,
381
- negative_prompt_embeds=None,
382
- controlnet_conditioning_scale=1.0,
383
- control_guidance_start=0.0,
384
- control_guidance_end=1.0,
385
- ):
386
- if (callback_steps is None) or (
387
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
388
- ):
389
- raise ValueError(
390
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
391
- f" {type(callback_steps)}."
392
- )
393
-
394
- if prompt is not None and prompt_embeds is not None:
395
- raise ValueError(
396
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
397
- " only forward one of the two."
398
- )
399
- elif prompt is None and prompt_embeds is None:
400
- raise ValueError(
401
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
402
- )
403
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
404
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
405
-
406
- if negative_prompt is not None and negative_prompt_embeds is not None:
407
- raise ValueError(
408
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
409
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
410
- )
411
-
412
- if prompt_embeds is not None and negative_prompt_embeds is not None:
413
- if prompt_embeds.shape != negative_prompt_embeds.shape:
414
- raise ValueError(
415
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
416
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
417
- f" {negative_prompt_embeds.shape}."
418
- )
419
-
420
- # Check `image`
421
- if num_controlnet == 1:
422
- self.check_image(image, prompt, prompt_embeds)
423
- elif num_controlnet > 1:
424
- if not isinstance(image, list):
425
- raise TypeError("For multiple controlnets: `image` must be type `list`")
426
-
427
- # When `image` is a nested list:
428
- # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
429
- elif any(isinstance(i, list) for i in image):
430
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
431
- elif len(image) != num_controlnet:
432
- raise ValueError(
433
- f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
434
- )
435
-
436
- for image_ in image:
437
- self.check_image(image_, prompt, prompt_embeds)
438
- else:
439
- assert False
440
-
441
- # Check `controlnet_conditioning_scale`
442
- if num_controlnet == 1:
443
- if not isinstance(controlnet_conditioning_scale, float):
444
- raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
445
- elif num_controlnet > 1:
446
- if isinstance(controlnet_conditioning_scale, list):
447
- if any(isinstance(i, list) for i in controlnet_conditioning_scale):
448
- raise ValueError("A single batch of multiple conditionings are supported at the moment.")
449
- elif (
450
- isinstance(controlnet_conditioning_scale, list)
451
- and len(controlnet_conditioning_scale) != num_controlnet
452
- ):
453
- raise ValueError(
454
- "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
455
- " the same length as the number of controlnets"
456
- )
457
- else:
458
- assert False
459
-
460
- if len(control_guidance_start) != len(control_guidance_end):
461
- raise ValueError(
462
- f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
463
- )
464
-
465
- if num_controlnet > 1:
466
- if len(control_guidance_start) != num_controlnet:
467
- raise ValueError(
468
- f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
469
- )
470
-
471
- for start, end in zip(control_guidance_start, control_guidance_end):
472
- if start >= end:
473
- raise ValueError(
474
- f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
475
- )
476
- if start < 0.0:
477
- raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
478
- if end > 1.0:
479
- raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
480
-
481
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
482
- def check_image(self, image, prompt, prompt_embeds):
483
- image_is_pil = isinstance(image, PIL.Image.Image)
484
- image_is_tensor = isinstance(image, torch.Tensor)
485
- image_is_np = isinstance(image, np.ndarray)
486
- image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
487
- image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
488
- image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
489
-
490
- if (
491
- not image_is_pil
492
- and not image_is_tensor
493
- and not image_is_np
494
- and not image_is_pil_list
495
- and not image_is_tensor_list
496
- and not image_is_np_list
497
- ):
498
- raise TypeError(
499
- f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
500
- )
501
-
502
- if image_is_pil:
503
- image_batch_size = 1
504
- else:
505
- image_batch_size = len(image)
506
-
507
- if prompt is not None and isinstance(prompt, str):
508
- prompt_batch_size = 1
509
- elif prompt is not None and isinstance(prompt, list):
510
- prompt_batch_size = len(prompt)
511
- elif prompt_embeds is not None:
512
- prompt_batch_size = prompt_embeds.shape[0]
513
-
514
- if image_batch_size != 1 and image_batch_size != prompt_batch_size:
515
- raise ValueError(
516
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
517
- )
518
-
519
- # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
520
- def prepare_control_image(
521
- self,
522
- image,
523
- width,
524
- height,
525
- batch_size,
526
- num_images_per_prompt,
527
- device,
528
- dtype,
529
- do_classifier_free_guidance=False,
530
- guess_mode=False,
531
- ):
532
- image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
533
- image_batch_size = image.shape[0]
534
-
535
- if image_batch_size == 1:
536
- repeat_by = batch_size
537
- else:
538
- # image batch size is the same as prompt batch size
539
- repeat_by = num_images_per_prompt
540
-
541
- image = image.repeat_interleave(repeat_by, dim=0)
542
-
543
- image = image.to(device=device, dtype=dtype)
544
-
545
- if do_classifier_free_guidance and not guess_mode:
546
- image = torch.cat([image] * 2)
547
-
548
- return image
549
-
550
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
551
- def get_timesteps(self, num_inference_steps, strength, device):
552
- # get the original timestep using init_timestep
553
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
554
-
555
- t_start = max(num_inference_steps - init_timestep, 0)
556
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
557
-
558
- return timesteps, num_inference_steps - t_start
559
-
560
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
561
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
562
- raise ValueError(
563
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
564
- )
565
-
566
- image = image.to(device=device, dtype=dtype)
567
-
568
- batch_size = batch_size * num_images_per_prompt
569
-
570
- if image.shape[1] == 4:
571
- init_latents = image
572
-
573
- else:
574
- _image = image.cpu().detach().numpy()
575
- init_latents = self.vae_encoder(sample=_image)[0]
576
- init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
577
- init_latents = 0.18215 * init_latents
578
-
579
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
580
- # expand init_latents for batch_size
581
- deprecation_message = (
582
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
583
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
584
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
585
- " your script to pass as many initial images as text prompts to suppress this warning."
586
- )
587
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
588
- additional_image_per_prompt = batch_size // init_latents.shape[0]
589
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
590
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
591
- raise ValueError(
592
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
593
- )
594
- else:
595
- init_latents = torch.cat([init_latents], dim=0)
596
-
597
- shape = init_latents.shape
598
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
599
-
600
- # get latents
601
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
602
- latents = init_latents
603
-
604
- return latents
605
-
606
- @torch.no_grad()
607
- @replace_example_docstring(EXAMPLE_DOC_STRING)
608
- def __call__(
609
- self,
610
- num_controlnet: int,
611
- fp16: bool = True,
612
- prompt: Union[str, List[str]] = None,
613
- image: Union[
614
- torch.Tensor,
615
- PIL.Image.Image,
616
- np.ndarray,
617
- List[torch.Tensor],
618
- List[PIL.Image.Image],
619
- List[np.ndarray],
620
- ] = None,
621
- control_image: Union[
622
- torch.Tensor,
623
- PIL.Image.Image,
624
- np.ndarray,
625
- List[torch.Tensor],
626
- List[PIL.Image.Image],
627
- List[np.ndarray],
628
- ] = None,
629
- height: Optional[int] = None,
630
- width: Optional[int] = None,
631
- strength: float = 0.8,
632
- num_inference_steps: int = 50,
633
- guidance_scale: float = 7.5,
634
- negative_prompt: Optional[Union[str, List[str]]] = None,
635
- num_images_per_prompt: Optional[int] = 1,
636
- eta: float = 0.0,
637
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
638
- latents: Optional[torch.Tensor] = None,
639
- prompt_embeds: Optional[torch.Tensor] = None,
640
- negative_prompt_embeds: Optional[torch.Tensor] = None,
641
- output_type: Optional[str] = "pil",
642
- return_dict: bool = True,
643
- callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
644
- callback_steps: int = 1,
645
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
646
- controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
647
- guess_mode: bool = False,
648
- control_guidance_start: Union[float, List[float]] = 0.0,
649
- control_guidance_end: Union[float, List[float]] = 1.0,
650
- ):
651
- r"""
652
- Function invoked when calling the pipeline for generation.
653
-
654
- Args:
655
- prompt (`str` or `List[str]`, *optional*):
656
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
657
- instead.
658
- image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
659
- `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
660
- The initial image will be used as the starting point for the image generation process. Can also accept
661
- image latents as `image`, if passing latents directly, it will not be encoded again.
662
- control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
663
- `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
664
- The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
665
- the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
666
- also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
667
- height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
668
- specified in init, images must be passed as a list such that each element of the list can be correctly
669
- batched for input to a single controlnet.
670
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
671
- The height in pixels of the generated image.
672
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
673
- The width in pixels of the generated image.
674
- num_inference_steps (`int`, *optional*, defaults to 50):
675
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
676
- expense of slower inference.
677
- guidance_scale (`float`, *optional*, defaults to 7.5):
678
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
679
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
680
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
681
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
682
- usually at the expense of lower image quality.
683
- negative_prompt (`str` or `List[str]`, *optional*):
684
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
685
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
686
- less than `1`).
687
- num_images_per_prompt (`int`, *optional*, defaults to 1):
688
- The number of images to generate per prompt.
689
- eta (`float`, *optional*, defaults to 0.0):
690
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
691
- [`schedulers.DDIMScheduler`], will be ignored for others.
692
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
693
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
694
- to make generation deterministic.
695
- latents (`torch.Tensor`, *optional*):
696
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
697
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
698
- tensor will ge generated by sampling using the supplied random `generator`.
699
- prompt_embeds (`torch.Tensor`, *optional*):
700
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
701
- provided, text embeddings will be generated from `prompt` input argument.
702
- negative_prompt_embeds (`torch.Tensor`, *optional*):
703
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
704
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
705
- argument.
706
- output_type (`str`, *optional*, defaults to `"pil"`):
707
- The output format of the generate image. Choose between
708
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
709
- return_dict (`bool`, *optional*, defaults to `True`):
710
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
711
- plain tuple.
712
- callback (`Callable`, *optional*):
713
- A function that will be called every `callback_steps` steps during inference. The function will be
714
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
715
- callback_steps (`int`, *optional*, defaults to 1):
716
- The frequency at which the `callback` function will be called. If not specified, the callback will be
717
- called at every step.
718
- cross_attention_kwargs (`dict`, *optional*):
719
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
720
- `self.processor` in
721
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
722
- controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
723
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
724
- to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
725
- corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
726
- than for [`~StableDiffusionControlNetPipeline.__call__`].
727
- guess_mode (`bool`, *optional*, defaults to `False`):
728
- In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
729
- you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
730
- control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
731
- The percentage of total steps at which the controlnet starts applying.
732
- control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
733
- The percentage of total steps at which the controlnet stops applying.
734
-
735
- Examples:
736
-
737
- Returns:
738
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
739
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
740
- When returning a tuple, the first element is a list with the generated images, and the second element is a
741
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
742
- (nsfw) content, according to the `safety_checker`.
743
- """
744
- if fp16:
745
- torch_dtype = torch.float16
746
- np_dtype = np.float16
747
- else:
748
- torch_dtype = torch.float32
749
- np_dtype = np.float32
750
-
751
- # align format for control guidance
752
- if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
753
- control_guidance_start = len(control_guidance_end) * [control_guidance_start]
754
- elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
755
- control_guidance_end = len(control_guidance_start) * [control_guidance_end]
756
- elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
757
- mult = num_controlnet
758
- control_guidance_start, control_guidance_end = (
759
- mult * [control_guidance_start],
760
- mult * [control_guidance_end],
761
- )
762
-
763
- # 1. Check inputs. Raise error if not correct
764
- self.check_inputs(
765
- num_controlnet,
766
- prompt,
767
- control_image,
768
- callback_steps,
769
- negative_prompt,
770
- prompt_embeds,
771
- negative_prompt_embeds,
772
- controlnet_conditioning_scale,
773
- control_guidance_start,
774
- control_guidance_end,
775
- )
776
-
777
- # 2. Define call parameters
778
- if prompt is not None and isinstance(prompt, str):
779
- batch_size = 1
780
- elif prompt is not None and isinstance(prompt, list):
781
- batch_size = len(prompt)
782
- else:
783
- batch_size = prompt_embeds.shape[0]
784
-
785
- device = self._execution_device
786
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
787
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
788
- # corresponds to doing no classifier free guidance.
789
- do_classifier_free_guidance = guidance_scale > 1.0
790
-
791
- if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
792
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
793
-
794
- # 3. Encode input prompt
795
- prompt_embeds = self._encode_prompt(
796
- prompt,
797
- num_images_per_prompt,
798
- do_classifier_free_guidance,
799
- negative_prompt,
800
- prompt_embeds=prompt_embeds,
801
- negative_prompt_embeds=negative_prompt_embeds,
802
- )
803
- # 4. Prepare image
804
- image = self.image_processor.preprocess(image).to(dtype=torch.float32)
805
-
806
- # 5. Prepare controlnet_conditioning_image
807
- if num_controlnet == 1:
808
- control_image = self.prepare_control_image(
809
- image=control_image,
810
- width=width,
811
- height=height,
812
- batch_size=batch_size * num_images_per_prompt,
813
- num_images_per_prompt=num_images_per_prompt,
814
- device=device,
815
- dtype=torch_dtype,
816
- do_classifier_free_guidance=do_classifier_free_guidance,
817
- guess_mode=guess_mode,
818
- )
819
- elif num_controlnet > 1:
820
- control_images = []
821
-
822
- for control_image_ in control_image:
823
- control_image_ = self.prepare_control_image(
824
- image=control_image_,
825
- width=width,
826
- height=height,
827
- batch_size=batch_size * num_images_per_prompt,
828
- num_images_per_prompt=num_images_per_prompt,
829
- device=device,
830
- dtype=torch_dtype,
831
- do_classifier_free_guidance=do_classifier_free_guidance,
832
- guess_mode=guess_mode,
833
- )
834
-
835
- control_images.append(control_image_)
836
-
837
- control_image = control_images
838
- else:
839
- assert False
840
-
841
- # 5. Prepare timesteps
842
- self.scheduler.set_timesteps(num_inference_steps, device=device)
843
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
844
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
845
-
846
- # 6. Prepare latent variables
847
- latents = self.prepare_latents(
848
- image,
849
- latent_timestep,
850
- batch_size,
851
- num_images_per_prompt,
852
- torch_dtype,
853
- device,
854
- generator,
855
- )
856
-
857
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
858
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
859
-
860
- # 7.1 Create tensor stating which controlnets to keep
861
- controlnet_keep = []
862
- for i in range(len(timesteps)):
863
- keeps = [
864
- 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
865
- for s, e in zip(control_guidance_start, control_guidance_end)
866
- ]
867
- controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
868
-
869
- # 8. Denoising loop
870
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
871
- with self.progress_bar(total=num_inference_steps) as progress_bar:
872
- for i, t in enumerate(timesteps):
873
- # expand the latents if we are doing classifier free guidance
874
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
875
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
876
-
877
- if isinstance(controlnet_keep[i], list):
878
- cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
879
- else:
880
- controlnet_cond_scale = controlnet_conditioning_scale
881
- if isinstance(controlnet_cond_scale, list):
882
- controlnet_cond_scale = controlnet_cond_scale[0]
883
- cond_scale = controlnet_cond_scale * controlnet_keep[i]
884
-
885
- # predict the noise residual
886
- _latent_model_input = latent_model_input.cpu().detach().numpy()
887
- _prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
888
- _t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
889
-
890
- if num_controlnet == 1:
891
- control_images = np.array([control_image], dtype=np_dtype)
892
- else:
893
- control_images = []
894
- for _control_img in control_image:
895
- _control_img = _control_img.cpu().detach().numpy()
896
- control_images.append(_control_img)
897
- control_images = np.array(control_images, dtype=np_dtype)
898
-
899
- control_scales = np.array(cond_scale, dtype=np_dtype)
900
- control_scales = np.resize(control_scales, (num_controlnet, 1))
901
-
902
- noise_pred = self.unet(
903
- sample=_latent_model_input,
904
- timestep=_t,
905
- encoder_hidden_states=_prompt_embeds,
906
- controlnet_conds=control_images,
907
- conditioning_scales=control_scales,
908
- )["noise_pred"]
909
- noise_pred = torch.from_numpy(noise_pred).to(device)
910
-
911
- # perform guidance
912
- if do_classifier_free_guidance:
913
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
914
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
915
-
916
- # compute the previous noisy sample x_t -> x_t-1
917
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
918
-
919
- # call the callback, if provided
920
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
921
- progress_bar.update()
922
- if callback is not None and i % callback_steps == 0:
923
- step_idx = i // getattr(self.scheduler, "order", 1)
924
- callback(step_idx, t, latents)
925
-
926
- if not output_type == "latent":
927
- _latents = latents.cpu().detach().numpy() / 0.18215
928
- _latents = np.array(_latents, dtype=np_dtype)
929
- image = self.vae_decoder(latent_sample=_latents)[0]
930
- image = torch.from_numpy(image).to(device, dtype=torch.float32)
931
- has_nsfw_concept = None
932
- else:
933
- image = latents
934
- has_nsfw_concept = None
935
-
936
- if has_nsfw_concept is None:
937
- do_denormalize = [True] * image.shape[0]
938
- else:
939
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
940
-
941
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
942
-
943
- if not return_dict:
944
- return (image, has_nsfw_concept)
945
-
946
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
947
-
948
-
949
- if __name__ == "__main__":
950
- parser = argparse.ArgumentParser()
951
-
952
- parser.add_argument(
953
- "--sd_model",
954
- type=str,
955
- required=True,
956
- help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
957
- )
958
-
959
- parser.add_argument(
960
- "--onnx_model_dir",
961
- type=str,
962
- required=True,
963
- help="Path to the ONNX directory",
964
- )
965
-
966
- parser.add_argument(
967
- "--unet_engine_path",
968
- type=str,
969
- required=True,
970
- help="Path to the unet + controlnet tensorrt model",
971
- )
972
-
973
- parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
974
-
975
- args = parser.parse_args()
976
-
977
- qr_image = Image.open(args.qr_img_path)
978
- qr_image = qr_image.resize((512, 512))
979
-
980
- # init stable diffusion pipeline
981
- pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
982
- pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
983
-
984
- provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
985
- onnx_pipeline = TensorRTStableDiffusionControlNetImg2ImgPipeline(
986
- vae_encoder=OnnxRuntimeModel.from_pretrained(
987
- os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
988
- ),
989
- vae_decoder=OnnxRuntimeModel.from_pretrained(
990
- os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
991
- ),
992
- text_encoder=OnnxRuntimeModel.from_pretrained(
993
- os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
994
- ),
995
- tokenizer=pipeline.tokenizer,
996
- unet=TensorRTModel(args.unet_engine_path),
997
- scheduler=pipeline.scheduler,
998
- )
999
- onnx_pipeline = onnx_pipeline.to("cuda")
1000
-
1001
- prompt = "a cute cat fly to the moon"
1002
- negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
1003
-
1004
- for i in range(10):
1005
- start_time = time.time()
1006
- image = onnx_pipeline(
1007
- num_controlnet=2,
1008
- prompt=prompt,
1009
- negative_prompt=negative_prompt,
1010
- image=qr_image,
1011
- control_image=[qr_image, qr_image],
1012
- width=512,
1013
- height=512,
1014
- strength=0.75,
1015
- num_inference_steps=20,
1016
- num_images_per_prompt=1,
1017
- controlnet_conditioning_scale=[0.8, 0.8],
1018
- control_guidance_start=[0.3, 0.3],
1019
- control_guidance_end=[0.9, 0.9],
1020
- ).images[0]
1021
- print(time.time() - start_time)
1022
- image.save("output_qr_code.png")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scheduling_ufogen.py DELETED
@@ -1,521 +0,0 @@
1
- # Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
16
-
17
- import math
18
- from dataclasses import dataclass
19
- from typing import List, Optional, Tuple, Union
20
-
21
- import numpy as np
22
- import torch
23
-
24
- from diffusers.configuration_utils import ConfigMixin, register_to_config
25
- from diffusers.schedulers.scheduling_utils import SchedulerMixin
26
- from diffusers.utils import BaseOutput
27
- from diffusers.utils.torch_utils import randn_tensor
28
-
29
-
30
- @dataclass
31
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UFOGen
32
- class UFOGenSchedulerOutput(BaseOutput):
33
- """
34
- Output class for the scheduler's `step` function output.
35
-
36
- Args:
37
- prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
38
- Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
39
- denoising loop.
40
- pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
41
- The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
42
- `pred_original_sample` can be used to preview progress or for guidance.
43
- """
44
-
45
- prev_sample: torch.Tensor
46
- pred_original_sample: Optional[torch.Tensor] = None
47
-
48
-
49
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
50
- def betas_for_alpha_bar(
51
- num_diffusion_timesteps,
52
- max_beta=0.999,
53
- alpha_transform_type="cosine",
54
- ):
55
- """
56
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
57
- (1-beta) over time from t = [0,1].
58
-
59
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
60
- to that part of the diffusion process.
61
-
62
-
63
- Args:
64
- num_diffusion_timesteps (`int`): the number of betas to produce.
65
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
66
- prevent singularities.
67
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
68
- Choose from `cosine` or `exp`
69
-
70
- Returns:
71
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
72
- """
73
- if alpha_transform_type == "cosine":
74
-
75
- def alpha_bar_fn(t):
76
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
77
-
78
- elif alpha_transform_type == "exp":
79
-
80
- def alpha_bar_fn(t):
81
- return math.exp(t * -12.0)
82
-
83
- else:
84
- raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
85
-
86
- betas = []
87
- for i in range(num_diffusion_timesteps):
88
- t1 = i / num_diffusion_timesteps
89
- t2 = (i + 1) / num_diffusion_timesteps
90
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
91
- return torch.tensor(betas, dtype=torch.float32)
92
-
93
-
94
- # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
95
- def rescale_zero_terminal_snr(betas):
96
- """
97
- Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
98
-
99
-
100
- Args:
101
- betas (`torch.Tensor`):
102
- the betas that the scheduler is being initialized with.
103
-
104
- Returns:
105
- `torch.Tensor`: rescaled betas with zero terminal SNR
106
- """
107
- # Convert betas to alphas_bar_sqrt
108
- alphas = 1.0 - betas
109
- alphas_cumprod = torch.cumprod(alphas, dim=0)
110
- alphas_bar_sqrt = alphas_cumprod.sqrt()
111
-
112
- # Store old values.
113
- alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
114
- alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
115
-
116
- # Shift so the last timestep is zero.
117
- alphas_bar_sqrt -= alphas_bar_sqrt_T
118
-
119
- # Scale so the first timestep is back to the old value.
120
- alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
121
-
122
- # Convert alphas_bar_sqrt to betas
123
- alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
124
- alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
125
- alphas = torch.cat([alphas_bar[0:1], alphas])
126
- betas = 1 - alphas
127
-
128
- return betas
129
-
130
-
131
- class UFOGenScheduler(SchedulerMixin, ConfigMixin):
132
- """
133
- `UFOGenScheduler` implements multistep and onestep sampling for a UFOGen model, introduced in
134
- [UFOGen: You Forward Once Large Scale Text-to-Image Generation via Diffusion GANs](https://arxiv.org/abs/2311.09257)
135
- by Yanwu Xu, Yang Zhao, Zhisheng Xiao, and Tingbo Hou. UFOGen is a varianet of the denoising diffusion GAN (DDGAN)
136
- model designed for one-step sampling.
137
-
138
- This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
139
- methods the library implements for all schedulers such as loading and saving.
140
-
141
- Args:
142
- num_train_timesteps (`int`, defaults to 1000):
143
- The number of diffusion steps to train the model.
144
- beta_start (`float`, defaults to 0.0001):
145
- The starting `beta` value of inference.
146
- beta_end (`float`, defaults to 0.02):
147
- The final `beta` value.
148
- beta_schedule (`str`, defaults to `"linear"`):
149
- The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
150
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
151
- clip_sample (`bool`, defaults to `True`):
152
- Clip the predicted sample for numerical stability.
153
- clip_sample_range (`float`, defaults to 1.0):
154
- The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
155
- set_alpha_to_one (`bool`, defaults to `True`):
156
- Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
157
- there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
158
- otherwise it uses the alpha value at step 0.
159
- prediction_type (`str`, defaults to `epsilon`, *optional*):
160
- Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
161
- `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
162
- Video](https://imagen.research.google/video/paper.pdf) paper).
163
- thresholding (`bool`, defaults to `False`):
164
- Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
165
- as Stable Diffusion.
166
- dynamic_thresholding_ratio (`float`, defaults to 0.995):
167
- The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
168
- sample_max_value (`float`, defaults to 1.0):
169
- The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
170
- timestep_spacing (`str`, defaults to `"leading"`):
171
- The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
172
- Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
173
- steps_offset (`int`, defaults to 0):
174
- An offset added to the inference steps, as required by some model families.
175
- rescale_betas_zero_snr (`bool`, defaults to `False`):
176
- Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
177
- dark samples instead of limiting it to samples with medium brightness. Loosely related to
178
- [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
179
- denoising_step_size (`int`, defaults to 250):
180
- The denoising step size parameter from the UFOGen paper. The number of steps used for training is roughly
181
- `math.ceil(num_train_timesteps / denoising_step_size)`.
182
- """
183
-
184
- order = 1
185
-
186
- @register_to_config
187
- def __init__(
188
- self,
189
- num_train_timesteps: int = 1000,
190
- beta_start: float = 0.0001,
191
- beta_end: float = 0.02,
192
- beta_schedule: str = "linear",
193
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
194
- clip_sample: bool = True,
195
- set_alpha_to_one: bool = True,
196
- prediction_type: str = "epsilon",
197
- thresholding: bool = False,
198
- dynamic_thresholding_ratio: float = 0.995,
199
- clip_sample_range: float = 1.0,
200
- sample_max_value: float = 1.0,
201
- timestep_spacing: str = "leading",
202
- steps_offset: int = 0,
203
- rescale_betas_zero_snr: bool = False,
204
- denoising_step_size: int = 250,
205
- ):
206
- if trained_betas is not None:
207
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
208
- elif beta_schedule == "linear":
209
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
210
- elif beta_schedule == "scaled_linear":
211
- # this schedule is very specific to the latent diffusion model.
212
- self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
213
- elif beta_schedule == "squaredcos_cap_v2":
214
- # Glide cosine schedule
215
- self.betas = betas_for_alpha_bar(num_train_timesteps)
216
- elif beta_schedule == "sigmoid":
217
- # GeoDiff sigmoid schedule
218
- betas = torch.linspace(-6, 6, num_train_timesteps)
219
- self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
220
- else:
221
- raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
222
-
223
- # Rescale for zero SNR
224
- if rescale_betas_zero_snr:
225
- self.betas = rescale_zero_terminal_snr(self.betas)
226
-
227
- self.alphas = 1.0 - self.betas
228
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
229
-
230
- # For the final step, there is no previous alphas_cumprod because we are already at 0
231
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
232
- # whether we use the final alpha of the "non-previous" one.
233
- self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
234
-
235
- # standard deviation of the initial noise distribution
236
- self.init_noise_sigma = 1.0
237
-
238
- # setable values
239
- self.custom_timesteps = False
240
- self.num_inference_steps = None
241
- self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
242
-
243
- def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
244
- """
245
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
246
- current timestep.
247
-
248
- Args:
249
- sample (`torch.Tensor`):
250
- The input sample.
251
- timestep (`int`, *optional*):
252
- The current timestep in the diffusion chain.
253
-
254
- Returns:
255
- `torch.Tensor`:
256
- A scaled input sample.
257
- """
258
- return sample
259
-
260
- def set_timesteps(
261
- self,
262
- num_inference_steps: Optional[int] = None,
263
- device: Union[str, torch.device] = None,
264
- timesteps: Optional[List[int]] = None,
265
- ):
266
- """
267
- Sets the discrete timesteps used for the diffusion chain (to be run before inference).
268
-
269
- Args:
270
- num_inference_steps (`int`):
271
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
272
- `timesteps` must be `None`.
273
- device (`str` or `torch.device`, *optional*):
274
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
275
- timesteps (`List[int]`, *optional*):
276
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
277
- timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed,
278
- `num_inference_steps` must be `None`.
279
-
280
- """
281
- if num_inference_steps is not None and timesteps is not None:
282
- raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.")
283
-
284
- if timesteps is not None:
285
- for i in range(1, len(timesteps)):
286
- if timesteps[i] >= timesteps[i - 1]:
287
- raise ValueError("`custom_timesteps` must be in descending order.")
288
-
289
- if timesteps[0] >= self.config.num_train_timesteps:
290
- raise ValueError(
291
- f"`timesteps` must start before `self.config.train_timesteps`:"
292
- f" {self.config.num_train_timesteps}."
293
- )
294
-
295
- timesteps = np.array(timesteps, dtype=np.int64)
296
- self.custom_timesteps = True
297
- else:
298
- if num_inference_steps > self.config.num_train_timesteps:
299
- raise ValueError(
300
- f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
301
- f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
302
- f" maximal {self.config.num_train_timesteps} timesteps."
303
- )
304
-
305
- self.num_inference_steps = num_inference_steps
306
- self.custom_timesteps = False
307
-
308
- # TODO: For now, handle special case when num_inference_steps == 1 separately
309
- if num_inference_steps == 1:
310
- # Set the timestep schedule to num_train_timesteps - 1 rather than 0
311
- # (that is, the one-step timestep schedule is always trailing rather than leading or linspace)
312
- timesteps = np.array([self.config.num_train_timesteps - 1], dtype=np.int64)
313
- else:
314
- # TODO: For now, retain the DDPM timestep spacing logic
315
- # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
316
- if self.config.timestep_spacing == "linspace":
317
- timesteps = (
318
- np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps)
319
- .round()[::-1]
320
- .copy()
321
- .astype(np.int64)
322
- )
323
- elif self.config.timestep_spacing == "leading":
324
- step_ratio = self.config.num_train_timesteps // self.num_inference_steps
325
- # creates integer timesteps by multiplying by ratio
326
- # casting to int to avoid issues when num_inference_step is power of 3
327
- timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
328
- timesteps += self.config.steps_offset
329
- elif self.config.timestep_spacing == "trailing":
330
- step_ratio = self.config.num_train_timesteps / self.num_inference_steps
331
- # creates integer timesteps by multiplying by ratio
332
- # casting to int to avoid issues when num_inference_step is power of 3
333
- timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64)
334
- timesteps -= 1
335
- else:
336
- raise ValueError(
337
- f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
338
- )
339
-
340
- self.timesteps = torch.from_numpy(timesteps).to(device)
341
-
342
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
343
- def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
344
- """
345
- "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
346
- prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
347
- s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
348
- pixels from saturation at each step. We find that dynamic thresholding results in significantly better
349
- photorealism as well as better image-text alignment, especially when using very large guidance weights."
350
-
351
- https://arxiv.org/abs/2205.11487
352
- """
353
- dtype = sample.dtype
354
- batch_size, channels, *remaining_dims = sample.shape
355
-
356
- if dtype not in (torch.float32, torch.float64):
357
- sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
358
-
359
- # Flatten sample for doing quantile calculation along each image
360
- sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
361
-
362
- abs_sample = sample.abs() # "a certain percentile absolute pixel value"
363
-
364
- s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
365
- s = torch.clamp(
366
- s, min=1, max=self.config.sample_max_value
367
- ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
368
- s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
369
- sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
370
-
371
- sample = sample.reshape(batch_size, channels, *remaining_dims)
372
- sample = sample.to(dtype)
373
-
374
- return sample
375
-
376
- def step(
377
- self,
378
- model_output: torch.Tensor,
379
- timestep: int,
380
- sample: torch.Tensor,
381
- generator: Optional[torch.Generator] = None,
382
- return_dict: bool = True,
383
- ) -> Union[UFOGenSchedulerOutput, Tuple]:
384
- """
385
- Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
386
- process from the learned model outputs (most often the predicted noise).
387
-
388
- Args:
389
- model_output (`torch.Tensor`):
390
- The direct output from learned diffusion model.
391
- timestep (`float`):
392
- The current discrete timestep in the diffusion chain.
393
- sample (`torch.Tensor`):
394
- A current instance of a sample created by the diffusion process.
395
- generator (`torch.Generator`, *optional*):
396
- A random number generator.
397
- return_dict (`bool`, *optional*, defaults to `True`):
398
- Whether or not to return a [`~schedulers.scheduling_ufogen.UFOGenSchedulerOutput`] or `tuple`.
399
-
400
- Returns:
401
- [`~schedulers.scheduling_ddpm.UFOGenSchedulerOutput`] or `tuple`:
402
- If return_dict is `True`, [`~schedulers.scheduling_ufogen.UFOGenSchedulerOutput`] is returned, otherwise a
403
- tuple is returned where the first element is the sample tensor.
404
-
405
- """
406
- # 0. Resolve timesteps
407
- t = timestep
408
- prev_t = self.previous_timestep(t)
409
-
410
- # 1. compute alphas, betas
411
- alpha_prod_t = self.alphas_cumprod[t]
412
- alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.final_alpha_cumprod
413
- beta_prod_t = 1 - alpha_prod_t
414
- # beta_prod_t_prev = 1 - alpha_prod_t_prev
415
- # current_alpha_t = alpha_prod_t / alpha_prod_t_prev
416
- # current_beta_t = 1 - current_alpha_t
417
-
418
- # 2. compute predicted original sample from predicted noise also called
419
- # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
420
- if self.config.prediction_type == "epsilon":
421
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
422
- elif self.config.prediction_type == "sample":
423
- pred_original_sample = model_output
424
- elif self.config.prediction_type == "v_prediction":
425
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
426
- else:
427
- raise ValueError(
428
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
429
- " `v_prediction` for UFOGenScheduler."
430
- )
431
-
432
- # 3. Clip or threshold "predicted x_0"
433
- if self.config.thresholding:
434
- pred_original_sample = self._threshold_sample(pred_original_sample)
435
- elif self.config.clip_sample:
436
- pred_original_sample = pred_original_sample.clamp(
437
- -self.config.clip_sample_range, self.config.clip_sample_range
438
- )
439
-
440
- # 4. Single-step or multi-step sampling
441
- # Noise is not used on the final timestep of the timestep schedule.
442
- # This also means that noise is not used for one-step sampling.
443
- if t != self.timesteps[-1]:
444
- # TODO: is this correct?
445
- # Sample prev sample x_{t - 1} ~ q(x_{t - 1} | x_0 = G(x_t, t))
446
- device = model_output.device
447
- noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype)
448
- sqrt_alpha_prod_t_prev = alpha_prod_t_prev**0.5
449
- sqrt_one_minus_alpha_prod_t_prev = (1 - alpha_prod_t_prev) ** 0.5
450
- pred_prev_sample = sqrt_alpha_prod_t_prev * pred_original_sample + sqrt_one_minus_alpha_prod_t_prev * noise
451
- else:
452
- # Simply return the pred_original_sample. If `prediction_type == "sample"`, this is equivalent to returning
453
- # the output of the GAN generator U-Net on the initial noisy latents x_T ~ N(0, I).
454
- pred_prev_sample = pred_original_sample
455
-
456
- if not return_dict:
457
- return (pred_prev_sample,)
458
-
459
- return UFOGenSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
460
-
461
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
462
- def add_noise(
463
- self,
464
- original_samples: torch.Tensor,
465
- noise: torch.Tensor,
466
- timesteps: torch.IntTensor,
467
- ) -> torch.Tensor:
468
- # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
469
- alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
470
- timesteps = timesteps.to(original_samples.device)
471
-
472
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
473
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
474
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
475
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
476
-
477
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
478
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
479
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
480
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
481
-
482
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
483
- return noisy_samples
484
-
485
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
486
- def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor:
487
- # Make sure alphas_cumprod and timestep have same device and dtype as sample
488
- alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
489
- timesteps = timesteps.to(sample.device)
490
-
491
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
492
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
493
- while len(sqrt_alpha_prod.shape) < len(sample.shape):
494
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
495
-
496
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
497
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
498
- while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
499
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
500
-
501
- velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
502
- return velocity
503
-
504
- def __len__(self):
505
- return self.config.num_train_timesteps
506
-
507
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep
508
- def previous_timestep(self, timestep):
509
- if self.custom_timesteps:
510
- index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0]
511
- if index == self.timesteps.shape[0] - 1:
512
- prev_t = torch.tensor(-1)
513
- else:
514
- prev_t = self.timesteps[index + 1]
515
- else:
516
- num_inference_steps = (
517
- self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
518
- )
519
- prev_t = timestep - self.config.num_train_timesteps // num_inference_steps
520
-
521
- return prev_t