Datasets:

ArXiv:
diffusers-bot commited on
Commit
38d00eb
1 Parent(s): 1ebfeb0

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. v0.24.0/README.md +0 -0
  2. v0.24.0/bit_diffusion.py +264 -0
  3. v0.24.0/checkpoint_merger.py +280 -0
  4. v0.24.0/clip_guided_images_mixing_stable_diffusion.py +455 -0
  5. v0.24.0/clip_guided_stable_diffusion.py +347 -0
  6. v0.24.0/clip_guided_stable_diffusion_img2img.py +493 -0
  7. v0.24.0/composable_stable_diffusion.py +582 -0
  8. v0.24.0/ddim_noise_comparative_analysis.py +190 -0
  9. v0.24.0/dps_pipeline.py +466 -0
  10. v0.24.0/edict_pipeline.py +264 -0
  11. v0.24.0/iadb.py +149 -0
  12. v0.24.0/imagic_stable_diffusion.py +496 -0
  13. v0.24.0/img2img_inpainting.py +464 -0
  14. v0.24.0/interpolate_stable_diffusion.py +525 -0
  15. v0.24.0/latent_consistency_img2img.py +827 -0
  16. v0.24.0/latent_consistency_interpolate.py +1051 -0
  17. v0.24.0/latent_consistency_txt2img.py +728 -0
  18. v0.24.0/llm_grounded_diffusion.py +1015 -0
  19. v0.24.0/lpw_stable_diffusion.py +1471 -0
  20. v0.24.0/lpw_stable_diffusion_onnx.py +1148 -0
  21. v0.24.0/lpw_stable_diffusion_xl.py +1312 -0
  22. v0.24.0/magic_mix.py +152 -0
  23. v0.24.0/masked_stable_diffusion_img2img.py +262 -0
  24. v0.24.0/mixture_canvas.py +501 -0
  25. v0.24.0/mixture_tiling.py +405 -0
  26. v0.24.0/multilingual_stable_diffusion.py +437 -0
  27. v0.24.0/one_step_unet.py +24 -0
  28. v0.24.0/pipeline_fabric.py +751 -0
  29. v0.24.0/pipeline_prompt2prompt.py +861 -0
  30. v0.24.0/pipeline_stable_diffusion_upscale_ldm3d.py +772 -0
  31. v0.24.0/pipeline_stable_diffusion_xl_controlnet_adapter.py +1463 -0
  32. v0.24.0/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +1908 -0
  33. v0.24.0/pipeline_zero1to3.py +893 -0
  34. v0.24.0/run_onnx_controlnet.py +911 -0
  35. v0.24.0/run_tensorrt_controlnet.py +1022 -0
  36. v0.24.0/sd_text2img_k_diffusion.py +476 -0
  37. v0.24.0/seed_resize_stable_diffusion.py +367 -0
  38. v0.24.0/speech_to_image_diffusion.py +262 -0
  39. v0.24.0/stable_diffusion_comparison.py +405 -0
  40. v0.24.0/stable_diffusion_controlnet_img2img.py +990 -0
  41. v0.24.0/stable_diffusion_controlnet_inpaint.py +1139 -0
  42. v0.24.0/stable_diffusion_controlnet_inpaint_img2img.py +1120 -0
  43. v0.24.0/stable_diffusion_controlnet_reference.py +838 -0
  44. v0.24.0/stable_diffusion_ipex.py +852 -0
  45. v0.24.0/stable_diffusion_mega.py +228 -0
  46. v0.24.0/stable_diffusion_reference.py +797 -0
  47. v0.24.0/stable_diffusion_repaint.py +958 -0
  48. v0.24.0/stable_diffusion_tensorrt_img2img.py +1055 -0
  49. v0.24.0/stable_diffusion_tensorrt_inpaint.py +1107 -0
  50. v0.24.0/stable_diffusion_tensorrt_txt2img.py +928 -0
v0.24.0/README.md ADDED
The diff for this file is too large to render. See raw diff
 
v0.24.0/bit_diffusion.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, Union
2
+
3
+ import torch
4
+ from einops import rearrange, reduce
5
+
6
+ from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel
7
+ from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
8
+ from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
9
+
10
+
11
+ BITS = 8
12
+
13
+
14
+ # convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py
15
+ def decimal_to_bits(x, bits=BITS):
16
+ """expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1"""
17
+ device = x.device
18
+
19
+ x = (x * 255).int().clamp(0, 255)
20
+
21
+ mask = 2 ** torch.arange(bits - 1, -1, -1, device=device)
22
+ mask = rearrange(mask, "d -> d 1 1")
23
+ x = rearrange(x, "b c h w -> b c 1 h w")
24
+
25
+ bits = ((x & mask) != 0).float()
26
+ bits = rearrange(bits, "b c d h w -> b (c d) h w")
27
+ bits = bits * 2 - 1
28
+ return bits
29
+
30
+
31
+ def bits_to_decimal(x, bits=BITS):
32
+ """expects bits from -1 to 1, outputs image tensor from 0 to 1"""
33
+ device = x.device
34
+
35
+ x = (x > 0).int()
36
+ mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32)
37
+
38
+ mask = rearrange(mask, "d -> d 1 1")
39
+ x = rearrange(x, "b (c d) h w -> b c d h w", d=8)
40
+ dec = reduce(x * mask, "b c d h w -> b c h w", "sum")
41
+ return (dec / 255).clamp(0.0, 1.0)
42
+
43
+
44
+ # modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale
45
+ def ddim_bit_scheduler_step(
46
+ self,
47
+ model_output: torch.FloatTensor,
48
+ timestep: int,
49
+ sample: torch.FloatTensor,
50
+ eta: float = 0.0,
51
+ use_clipped_model_output: bool = True,
52
+ generator=None,
53
+ return_dict: bool = True,
54
+ ) -> Union[DDIMSchedulerOutput, Tuple]:
55
+ """
56
+ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
57
+ process from the learned model outputs (most often the predicted noise).
58
+ Args:
59
+ model_output (`torch.FloatTensor`): direct output from learned diffusion model.
60
+ timestep (`int`): current discrete timestep in the diffusion chain.
61
+ sample (`torch.FloatTensor`):
62
+ current instance of sample being created by diffusion process.
63
+ eta (`float`): weight of noise for added noise in diffusion step.
64
+ use_clipped_model_output (`bool`): TODO
65
+ generator: random number generator.
66
+ return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class
67
+ Returns:
68
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
69
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
70
+ returning a tuple, the first element is the sample tensor.
71
+ """
72
+ if self.num_inference_steps is None:
73
+ raise ValueError(
74
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
75
+ )
76
+
77
+ # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
78
+ # Ideally, read DDIM paper in-detail understanding
79
+
80
+ # Notation (<variable name> -> <name in paper>
81
+ # - pred_noise_t -> e_theta(x_t, t)
82
+ # - pred_original_sample -> f_theta(x_t, t) or x_0
83
+ # - std_dev_t -> sigma_t
84
+ # - eta -> η
85
+ # - pred_sample_direction -> "direction pointing to x_t"
86
+ # - pred_prev_sample -> "x_t-1"
87
+
88
+ # 1. get previous step value (=t-1)
89
+ prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
90
+
91
+ # 2. compute alphas, betas
92
+ alpha_prod_t = self.alphas_cumprod[timestep]
93
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
94
+
95
+ beta_prod_t = 1 - alpha_prod_t
96
+
97
+ # 3. compute predicted original sample from predicted noise also called
98
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
99
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
100
+
101
+ # 4. Clip "predicted x_0"
102
+ scale = self.bit_scale
103
+ if self.config.clip_sample:
104
+ pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
105
+
106
+ # 5. compute variance: "sigma_t(η)" -> see formula (16)
107
+ # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
108
+ variance = self._get_variance(timestep, prev_timestep)
109
+ std_dev_t = eta * variance ** (0.5)
110
+
111
+ if use_clipped_model_output:
112
+ # the model_output is always re-derived from the clipped x_0 in Glide
113
+ model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
114
+
115
+ # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
116
+ pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
117
+
118
+ # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
119
+ prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
120
+
121
+ if eta > 0:
122
+ # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
123
+ device = model_output.device if torch.is_tensor(model_output) else "cpu"
124
+ noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device)
125
+ variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise
126
+
127
+ prev_sample = prev_sample + variance
128
+
129
+ if not return_dict:
130
+ return (prev_sample,)
131
+
132
+ return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
133
+
134
+
135
+ def ddpm_bit_scheduler_step(
136
+ self,
137
+ model_output: torch.FloatTensor,
138
+ timestep: int,
139
+ sample: torch.FloatTensor,
140
+ prediction_type="epsilon",
141
+ generator=None,
142
+ return_dict: bool = True,
143
+ ) -> Union[DDPMSchedulerOutput, Tuple]:
144
+ """
145
+ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
146
+ process from the learned model outputs (most often the predicted noise).
147
+ Args:
148
+ model_output (`torch.FloatTensor`): direct output from learned diffusion model.
149
+ timestep (`int`): current discrete timestep in the diffusion chain.
150
+ sample (`torch.FloatTensor`):
151
+ current instance of sample being created by diffusion process.
152
+ prediction_type (`str`, default `epsilon`):
153
+ indicates whether the model predicts the noise (epsilon), or the samples (`sample`).
154
+ generator: random number generator.
155
+ return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
156
+ Returns:
157
+ [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
158
+ [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
159
+ returning a tuple, the first element is the sample tensor.
160
+ """
161
+ t = timestep
162
+
163
+ if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
164
+ model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
165
+ else:
166
+ predicted_variance = None
167
+
168
+ # 1. compute alphas, betas
169
+ alpha_prod_t = self.alphas_cumprod[t]
170
+ alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
171
+ beta_prod_t = 1 - alpha_prod_t
172
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
173
+
174
+ # 2. compute predicted original sample from predicted noise also called
175
+ # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
176
+ if prediction_type == "epsilon":
177
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
178
+ elif prediction_type == "sample":
179
+ pred_original_sample = model_output
180
+ else:
181
+ raise ValueError(f"Unsupported prediction_type {prediction_type}.")
182
+
183
+ # 3. Clip "predicted x_0"
184
+ scale = self.bit_scale
185
+ if self.config.clip_sample:
186
+ pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
187
+
188
+ # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
189
+ # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
190
+ pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
191
+ current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
192
+
193
+ # 5. Compute predicted previous sample µ_t
194
+ # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
195
+ pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
196
+
197
+ # 6. Add noise
198
+ variance = 0
199
+ if t > 0:
200
+ noise = torch.randn(
201
+ model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator
202
+ ).to(model_output.device)
203
+ variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
204
+
205
+ pred_prev_sample = pred_prev_sample + variance
206
+
207
+ if not return_dict:
208
+ return (pred_prev_sample,)
209
+
210
+ return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
211
+
212
+
213
+ class BitDiffusion(DiffusionPipeline):
214
+ def __init__(
215
+ self,
216
+ unet: UNet2DConditionModel,
217
+ scheduler: Union[DDIMScheduler, DDPMScheduler],
218
+ bit_scale: Optional[float] = 1.0,
219
+ ):
220
+ super().__init__()
221
+ self.bit_scale = bit_scale
222
+ self.scheduler.step = (
223
+ ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step
224
+ )
225
+
226
+ self.register_modules(unet=unet, scheduler=scheduler)
227
+
228
+ @torch.no_grad()
229
+ def __call__(
230
+ self,
231
+ height: Optional[int] = 256,
232
+ width: Optional[int] = 256,
233
+ num_inference_steps: Optional[int] = 50,
234
+ generator: Optional[torch.Generator] = None,
235
+ batch_size: Optional[int] = 1,
236
+ output_type: Optional[str] = "pil",
237
+ return_dict: bool = True,
238
+ **kwargs,
239
+ ) -> Union[Tuple, ImagePipelineOutput]:
240
+ latents = torch.randn(
241
+ (batch_size, self.unet.config.in_channels, height, width),
242
+ generator=generator,
243
+ )
244
+ latents = decimal_to_bits(latents) * self.bit_scale
245
+ latents = latents.to(self.device)
246
+
247
+ self.scheduler.set_timesteps(num_inference_steps)
248
+
249
+ for t in self.progress_bar(self.scheduler.timesteps):
250
+ # predict the noise residual
251
+ noise_pred = self.unet(latents, t).sample
252
+
253
+ # compute the previous noisy sample x_t -> x_t-1
254
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
255
+
256
+ image = bits_to_decimal(latents)
257
+
258
+ if output_type == "pil":
259
+ image = self.numpy_to_pil(image)
260
+
261
+ if not return_dict:
262
+ return (image,)
263
+
264
+ return ImagePipelineOutput(images=image)
v0.24.0/checkpoint_merger.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ from typing import Dict, List, Union
4
+
5
+ import safetensors.torch
6
+ import torch
7
+ from huggingface_hub import snapshot_download
8
+
9
+ from diffusers import DiffusionPipeline, __version__
10
+ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
11
+ from diffusers.utils import CONFIG_NAME, DIFFUSERS_CACHE, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
12
+
13
+
14
+ class CheckpointMergerPipeline(DiffusionPipeline):
15
+ """
16
+ A class that supports merging diffusion models based on the discussion here:
17
+ https://github.com/huggingface/diffusers/issues/877
18
+
19
+ Example usage:-
20
+
21
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
22
+
23
+ merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
24
+
25
+ merged_pipe.to('cuda')
26
+
27
+ prompt = "An astronaut riding a unicycle on Mars"
28
+
29
+ results = merged_pipe(prompt)
30
+
31
+ ## For more details, see the docstring for the merge method.
32
+
33
+ """
34
+
35
+ def __init__(self):
36
+ self.register_to_config()
37
+ super().__init__()
38
+
39
+ def _compare_model_configs(self, dict0, dict1):
40
+ if dict0 == dict1:
41
+ return True
42
+ else:
43
+ config0, meta_keys0 = self._remove_meta_keys(dict0)
44
+ config1, meta_keys1 = self._remove_meta_keys(dict1)
45
+ if config0 == config1:
46
+ print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
47
+ return True
48
+ return False
49
+
50
+ def _remove_meta_keys(self, config_dict: Dict):
51
+ meta_keys = []
52
+ temp_dict = config_dict.copy()
53
+ for key in config_dict.keys():
54
+ if key.startswith("_"):
55
+ temp_dict.pop(key)
56
+ meta_keys.append(key)
57
+ return (temp_dict, meta_keys)
58
+
59
+ @torch.no_grad()
60
+ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs):
61
+ """
62
+ Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
63
+ in the argument 'pretrained_model_name_or_path_list' as a list.
64
+
65
+ Parameters:
66
+ -----------
67
+ pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
68
+
69
+ **kwargs:
70
+ Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
71
+
72
+ cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map.
73
+
74
+ alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
75
+ would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
76
+
77
+ interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None.
78
+ Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported.
79
+
80
+ force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
81
+
82
+ """
83
+ # Default kwargs from DiffusionPipeline
84
+ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
85
+ resume_download = kwargs.pop("resume_download", False)
86
+ force_download = kwargs.pop("force_download", False)
87
+ proxies = kwargs.pop("proxies", None)
88
+ local_files_only = kwargs.pop("local_files_only", False)
89
+ use_auth_token = kwargs.pop("use_auth_token", None)
90
+ revision = kwargs.pop("revision", None)
91
+ torch_dtype = kwargs.pop("torch_dtype", None)
92
+ device_map = kwargs.pop("device_map", None)
93
+
94
+ alpha = kwargs.pop("alpha", 0.5)
95
+ interp = kwargs.pop("interp", None)
96
+
97
+ print("Received list", pretrained_model_name_or_path_list)
98
+ print(f"Combining with alpha={alpha}, interpolation mode={interp}")
99
+
100
+ checkpoint_count = len(pretrained_model_name_or_path_list)
101
+ # Ignore result from model_index_json comparision of the two checkpoints
102
+ force = kwargs.pop("force", False)
103
+
104
+ # If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
105
+ if checkpoint_count > 3 or checkpoint_count < 2:
106
+ raise ValueError(
107
+ "Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
108
+ " passed."
109
+ )
110
+
111
+ print("Received the right number of checkpoints")
112
+ # chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
113
+ # chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
114
+
115
+ # Validate that the checkpoints can be merged
116
+ # Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
117
+ config_dicts = []
118
+ for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
119
+ config_dict = DiffusionPipeline.load_config(
120
+ pretrained_model_name_or_path,
121
+ cache_dir=cache_dir,
122
+ resume_download=resume_download,
123
+ force_download=force_download,
124
+ proxies=proxies,
125
+ local_files_only=local_files_only,
126
+ use_auth_token=use_auth_token,
127
+ revision=revision,
128
+ )
129
+ config_dicts.append(config_dict)
130
+
131
+ comparison_result = True
132
+ for idx in range(1, len(config_dicts)):
133
+ comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx])
134
+ if not force and comparison_result is False:
135
+ raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.")
136
+ print(config_dicts[0], config_dicts[1])
137
+ print("Compatible model_index.json files found")
138
+ # Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
139
+ cached_folders = []
140
+ for pretrained_model_name_or_path, config_dict in zip(pretrained_model_name_or_path_list, config_dicts):
141
+ folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
142
+ allow_patterns = [os.path.join(k, "*") for k in folder_names]
143
+ allow_patterns += [
144
+ WEIGHTS_NAME,
145
+ SCHEDULER_CONFIG_NAME,
146
+ CONFIG_NAME,
147
+ ONNX_WEIGHTS_NAME,
148
+ DiffusionPipeline.config_name,
149
+ ]
150
+ requested_pipeline_class = config_dict.get("_class_name")
151
+ user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class}
152
+
153
+ cached_folder = (
154
+ pretrained_model_name_or_path
155
+ if os.path.isdir(pretrained_model_name_or_path)
156
+ else snapshot_download(
157
+ pretrained_model_name_or_path,
158
+ cache_dir=cache_dir,
159
+ resume_download=resume_download,
160
+ proxies=proxies,
161
+ local_files_only=local_files_only,
162
+ use_auth_token=use_auth_token,
163
+ revision=revision,
164
+ allow_patterns=allow_patterns,
165
+ user_agent=user_agent,
166
+ )
167
+ )
168
+ print("Cached Folder", cached_folder)
169
+ cached_folders.append(cached_folder)
170
+
171
+ # Step 3:-
172
+ # Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
173
+ final_pipe = DiffusionPipeline.from_pretrained(
174
+ cached_folders[0], torch_dtype=torch_dtype, device_map=device_map
175
+ )
176
+ final_pipe.to(self.device)
177
+
178
+ checkpoint_path_2 = None
179
+ if len(cached_folders) > 2:
180
+ checkpoint_path_2 = os.path.join(cached_folders[2])
181
+
182
+ if interp == "sigmoid":
183
+ theta_func = CheckpointMergerPipeline.sigmoid
184
+ elif interp == "inv_sigmoid":
185
+ theta_func = CheckpointMergerPipeline.inv_sigmoid
186
+ elif interp == "add_diff":
187
+ theta_func = CheckpointMergerPipeline.add_difference
188
+ else:
189
+ theta_func = CheckpointMergerPipeline.weighted_sum
190
+
191
+ # Find each module's state dict.
192
+ for attr in final_pipe.config.keys():
193
+ if not attr.startswith("_"):
194
+ checkpoint_path_1 = os.path.join(cached_folders[1], attr)
195
+ if os.path.exists(checkpoint_path_1):
196
+ files = [
197
+ *glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")),
198
+ *glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
199
+ ]
200
+ checkpoint_path_1 = files[0] if len(files) > 0 else None
201
+ if len(cached_folders) < 3:
202
+ checkpoint_path_2 = None
203
+ else:
204
+ checkpoint_path_2 = os.path.join(cached_folders[2], attr)
205
+ if os.path.exists(checkpoint_path_2):
206
+ files = [
207
+ *glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")),
208
+ *glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
209
+ ]
210
+ checkpoint_path_2 = files[0] if len(files) > 0 else None
211
+ # For an attr if both checkpoint_path_1 and 2 are None, ignore.
212
+ # If atleast one is present, deal with it according to interp method, of course only if the state_dict keys match.
213
+ if checkpoint_path_1 is None and checkpoint_path_2 is None:
214
+ print(f"Skipping {attr}: not present in 2nd or 3d model")
215
+ continue
216
+ try:
217
+ module = getattr(final_pipe, attr)
218
+ if isinstance(module, bool): # ignore requires_safety_checker boolean
219
+ continue
220
+ theta_0 = getattr(module, "state_dict")
221
+ theta_0 = theta_0()
222
+
223
+ update_theta_0 = getattr(module, "load_state_dict")
224
+ theta_1 = (
225
+ safetensors.torch.load_file(checkpoint_path_1)
226
+ if (checkpoint_path_1.endswith(".safetensors"))
227
+ else torch.load(checkpoint_path_1, map_location="cpu")
228
+ )
229
+ theta_2 = None
230
+ if checkpoint_path_2:
231
+ theta_2 = (
232
+ safetensors.torch.load_file(checkpoint_path_2)
233
+ if (checkpoint_path_2.endswith(".safetensors"))
234
+ else torch.load(checkpoint_path_2, map_location="cpu")
235
+ )
236
+
237
+ if not theta_0.keys() == theta_1.keys():
238
+ print(f"Skipping {attr}: key mismatch")
239
+ continue
240
+ if theta_2 and not theta_1.keys() == theta_2.keys():
241
+ print(f"Skipping {attr}:y mismatch")
242
+ except Exception as e:
243
+ print(f"Skipping {attr} do to an unexpected error: {str(e)}")
244
+ continue
245
+ print(f"MERGING {attr}")
246
+
247
+ for key in theta_0.keys():
248
+ if theta_2:
249
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha)
250
+ else:
251
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha)
252
+
253
+ del theta_1
254
+ del theta_2
255
+ update_theta_0(theta_0)
256
+
257
+ del theta_0
258
+ return final_pipe
259
+
260
+ @staticmethod
261
+ def weighted_sum(theta0, theta1, theta2, alpha):
262
+ return ((1 - alpha) * theta0) + (alpha * theta1)
263
+
264
+ # Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
265
+ @staticmethod
266
+ def sigmoid(theta0, theta1, theta2, alpha):
267
+ alpha = alpha * alpha * (3 - (2 * alpha))
268
+ return theta0 + ((theta1 - theta0) * alpha)
269
+
270
+ # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
271
+ @staticmethod
272
+ def inv_sigmoid(theta0, theta1, theta2, alpha):
273
+ import math
274
+
275
+ alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
276
+ return theta0 + ((theta1 - theta0) * alpha)
277
+
278
+ @staticmethod
279
+ def add_difference(theta0, theta1, theta2, alpha):
280
+ return theta0 + (theta1 - theta2) * (1.0 - alpha)
v0.24.0/clip_guided_images_mixing_stable_diffusion.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import inspect
3
+ from typing import Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from torch.nn import functional as F
9
+ from torchvision import transforms
10
+ from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
+
12
+ from diffusers import (
13
+ AutoencoderKL,
14
+ DDIMScheduler,
15
+ DiffusionPipeline,
16
+ DPMSolverMultistepScheduler,
17
+ LMSDiscreteScheduler,
18
+ PNDMScheduler,
19
+ UNet2DConditionModel,
20
+ )
21
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
+ from diffusers.utils import PIL_INTERPOLATION
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ def preprocess(image, w, h):
27
+ if isinstance(image, torch.Tensor):
28
+ return image
29
+ elif isinstance(image, PIL.Image.Image):
30
+ image = [image]
31
+
32
+ if isinstance(image[0], PIL.Image.Image):
33
+ image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
34
+ image = np.concatenate(image, axis=0)
35
+ image = np.array(image).astype(np.float32) / 255.0
36
+ image = image.transpose(0, 3, 1, 2)
37
+ image = 2.0 * image - 1.0
38
+ image = torch.from_numpy(image)
39
+ elif isinstance(image[0], torch.Tensor):
40
+ image = torch.cat(image, dim=0)
41
+ return image
42
+
43
+
44
+ def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
45
+ if not isinstance(v0, np.ndarray):
46
+ inputs_are_torch = True
47
+ input_device = v0.device
48
+ v0 = v0.cpu().numpy()
49
+ v1 = v1.cpu().numpy()
50
+
51
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
52
+ if np.abs(dot) > DOT_THRESHOLD:
53
+ v2 = (1 - t) * v0 + t * v1
54
+ else:
55
+ theta_0 = np.arccos(dot)
56
+ sin_theta_0 = np.sin(theta_0)
57
+ theta_t = theta_0 * t
58
+ sin_theta_t = np.sin(theta_t)
59
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
60
+ s1 = sin_theta_t / sin_theta_0
61
+ v2 = s0 * v0 + s1 * v1
62
+
63
+ if inputs_are_torch:
64
+ v2 = torch.from_numpy(v2).to(input_device)
65
+
66
+ return v2
67
+
68
+
69
+ def spherical_dist_loss(x, y):
70
+ x = F.normalize(x, dim=-1)
71
+ y = F.normalize(y, dim=-1)
72
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
73
+
74
+
75
+ def set_requires_grad(model, value):
76
+ for param in model.parameters():
77
+ param.requires_grad = value
78
+
79
+
80
+ class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline):
81
+ def __init__(
82
+ self,
83
+ vae: AutoencoderKL,
84
+ text_encoder: CLIPTextModel,
85
+ clip_model: CLIPModel,
86
+ tokenizer: CLIPTokenizer,
87
+ unet: UNet2DConditionModel,
88
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
89
+ feature_extractor: CLIPFeatureExtractor,
90
+ coca_model=None,
91
+ coca_tokenizer=None,
92
+ coca_transform=None,
93
+ ):
94
+ super().__init__()
95
+ self.register_modules(
96
+ vae=vae,
97
+ text_encoder=text_encoder,
98
+ clip_model=clip_model,
99
+ tokenizer=tokenizer,
100
+ unet=unet,
101
+ scheduler=scheduler,
102
+ feature_extractor=feature_extractor,
103
+ coca_model=coca_model,
104
+ coca_tokenizer=coca_tokenizer,
105
+ coca_transform=coca_transform,
106
+ )
107
+ self.feature_extractor_size = (
108
+ feature_extractor.size
109
+ if isinstance(feature_extractor.size, int)
110
+ else feature_extractor.size["shortest_edge"]
111
+ )
112
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
113
+ set_requires_grad(self.text_encoder, False)
114
+ set_requires_grad(self.clip_model, False)
115
+
116
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
117
+ if slice_size == "auto":
118
+ # half the attention head size is usually a good trade-off between
119
+ # speed and memory
120
+ slice_size = self.unet.config.attention_head_dim // 2
121
+ self.unet.set_attention_slice(slice_size)
122
+
123
+ def disable_attention_slicing(self):
124
+ self.enable_attention_slicing(None)
125
+
126
+ def freeze_vae(self):
127
+ set_requires_grad(self.vae, False)
128
+
129
+ def unfreeze_vae(self):
130
+ set_requires_grad(self.vae, True)
131
+
132
+ def freeze_unet(self):
133
+ set_requires_grad(self.unet, False)
134
+
135
+ def unfreeze_unet(self):
136
+ set_requires_grad(self.unet, True)
137
+
138
+ def get_timesteps(self, num_inference_steps, strength, device):
139
+ # get the original timestep using init_timestep
140
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
141
+
142
+ t_start = max(num_inference_steps - init_timestep, 0)
143
+ timesteps = self.scheduler.timesteps[t_start:]
144
+
145
+ return timesteps, num_inference_steps - t_start
146
+
147
+ def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
148
+ if not isinstance(image, torch.Tensor):
149
+ raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
150
+
151
+ image = image.to(device=device, dtype=dtype)
152
+
153
+ if isinstance(generator, list):
154
+ init_latents = [
155
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
156
+ ]
157
+ init_latents = torch.cat(init_latents, dim=0)
158
+ else:
159
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
160
+
161
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
162
+ init_latents = 0.18215 * init_latents
163
+ init_latents = init_latents.repeat_interleave(batch_size, dim=0)
164
+
165
+ noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
166
+
167
+ # get latents
168
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
169
+ latents = init_latents
170
+
171
+ return latents
172
+
173
+ def get_image_description(self, image):
174
+ transformed_image = self.coca_transform(image).unsqueeze(0)
175
+ with torch.no_grad(), torch.cuda.amp.autocast():
176
+ generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
177
+ generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
178
+ return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
179
+
180
+ def get_clip_image_embeddings(self, image, batch_size):
181
+ clip_image_input = self.feature_extractor.preprocess(image)
182
+ clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
183
+ image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
184
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
185
+ image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
186
+ return image_embeddings_clip
187
+
188
+ @torch.enable_grad()
189
+ def cond_fn(
190
+ self,
191
+ latents,
192
+ timestep,
193
+ index,
194
+ text_embeddings,
195
+ noise_pred_original,
196
+ original_image_embeddings_clip,
197
+ clip_guidance_scale,
198
+ ):
199
+ latents = latents.detach().requires_grad_()
200
+
201
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
202
+
203
+ # predict the noise residual
204
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
205
+
206
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
207
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
208
+ beta_prod_t = 1 - alpha_prod_t
209
+ # compute predicted original sample from predicted noise also called
210
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
211
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
212
+
213
+ fac = torch.sqrt(beta_prod_t)
214
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
215
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
216
+ sigma = self.scheduler.sigmas[index]
217
+ sample = latents - sigma * noise_pred
218
+ else:
219
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
220
+
221
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
222
+ sample = 1 / 0.18215 * sample
223
+ image = self.vae.decode(sample).sample
224
+ image = (image / 2 + 0.5).clamp(0, 1)
225
+
226
+ image = transforms.Resize(self.feature_extractor_size)(image)
227
+ image = self.normalize(image).to(latents.dtype)
228
+
229
+ image_embeddings_clip = self.clip_model.get_image_features(image)
230
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
231
+
232
+ loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
233
+
234
+ grads = -torch.autograd.grad(loss, latents)[0]
235
+
236
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
237
+ latents = latents.detach() + grads * (sigma**2)
238
+ noise_pred = noise_pred_original
239
+ else:
240
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
241
+ return noise_pred, latents
242
+
243
+ @torch.no_grad()
244
+ def __call__(
245
+ self,
246
+ style_image: Union[torch.FloatTensor, PIL.Image.Image],
247
+ content_image: Union[torch.FloatTensor, PIL.Image.Image],
248
+ style_prompt: Optional[str] = None,
249
+ content_prompt: Optional[str] = None,
250
+ height: Optional[int] = 512,
251
+ width: Optional[int] = 512,
252
+ noise_strength: float = 0.6,
253
+ num_inference_steps: Optional[int] = 50,
254
+ guidance_scale: Optional[float] = 7.5,
255
+ batch_size: Optional[int] = 1,
256
+ eta: float = 0.0,
257
+ clip_guidance_scale: Optional[float] = 100,
258
+ generator: Optional[torch.Generator] = None,
259
+ output_type: Optional[str] = "pil",
260
+ return_dict: bool = True,
261
+ slerp_latent_style_strength: float = 0.8,
262
+ slerp_prompt_style_strength: float = 0.1,
263
+ slerp_clip_image_style_strength: float = 0.1,
264
+ ):
265
+ if isinstance(generator, list) and len(generator) != batch_size:
266
+ raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
267
+
268
+ if height % 8 != 0 or width % 8 != 0:
269
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
270
+
271
+ if isinstance(generator, torch.Generator) and batch_size > 1:
272
+ generator = [generator] + [None] * (batch_size - 1)
273
+
274
+ coca_is_none = [
275
+ ("model", self.coca_model is None),
276
+ ("tokenizer", self.coca_tokenizer is None),
277
+ ("transform", self.coca_transform is None),
278
+ ]
279
+ coca_is_none = [x[0] for x in coca_is_none if x[1]]
280
+ coca_is_none_str = ", ".join(coca_is_none)
281
+ # generate prompts with coca model if prompt is None
282
+ if content_prompt is None:
283
+ if len(coca_is_none):
284
+ raise ValueError(
285
+ f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
286
+ f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
287
+ )
288
+ content_prompt = self.get_image_description(content_image)
289
+ if style_prompt is None:
290
+ if len(coca_is_none):
291
+ raise ValueError(
292
+ f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
293
+ f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
294
+ )
295
+ style_prompt = self.get_image_description(style_image)
296
+
297
+ # get prompt text embeddings for content and style
298
+ content_text_input = self.tokenizer(
299
+ content_prompt,
300
+ padding="max_length",
301
+ max_length=self.tokenizer.model_max_length,
302
+ truncation=True,
303
+ return_tensors="pt",
304
+ )
305
+ content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
306
+
307
+ style_text_input = self.tokenizer(
308
+ style_prompt,
309
+ padding="max_length",
310
+ max_length=self.tokenizer.model_max_length,
311
+ truncation=True,
312
+ return_tensors="pt",
313
+ )
314
+ style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
315
+
316
+ text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
317
+
318
+ # duplicate text embeddings for each generation per prompt
319
+ text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
320
+
321
+ # set timesteps
322
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
323
+ extra_set_kwargs = {}
324
+ if accepts_offset:
325
+ extra_set_kwargs["offset"] = 1
326
+
327
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
328
+ # Some schedulers like PNDM have timesteps as arrays
329
+ # It's more optimized to move all timesteps to correct device beforehand
330
+ self.scheduler.timesteps.to(self.device)
331
+
332
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
333
+ latent_timestep = timesteps[:1].repeat(batch_size)
334
+
335
+ # Preprocess image
336
+ preprocessed_content_image = preprocess(content_image, width, height)
337
+ content_latents = self.prepare_latents(
338
+ preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
339
+ )
340
+
341
+ preprocessed_style_image = preprocess(style_image, width, height)
342
+ style_latents = self.prepare_latents(
343
+ preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
344
+ )
345
+
346
+ latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
347
+
348
+ if clip_guidance_scale > 0:
349
+ content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
350
+ style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
351
+ clip_image_embeddings = slerp(
352
+ slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
353
+ )
354
+
355
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
356
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
357
+ # corresponds to doing no classifier free guidance.
358
+ do_classifier_free_guidance = guidance_scale > 1.0
359
+ # get unconditional embeddings for classifier free guidance
360
+ if do_classifier_free_guidance:
361
+ max_length = content_text_input.input_ids.shape[-1]
362
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
363
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
364
+ # duplicate unconditional embeddings for each generation per prompt
365
+ uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
366
+
367
+ # For classifier free guidance, we need to do two forward passes.
368
+ # Here we concatenate the unconditional and text embeddings into a single batch
369
+ # to avoid doing two forward passes
370
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
371
+
372
+ # get the initial random noise unless the user supplied it
373
+
374
+ # Unlike in other pipelines, latents need to be generated in the target device
375
+ # for 1-to-1 results reproducibility with the CompVis implementation.
376
+ # However this currently doesn't work in `mps`.
377
+ latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
378
+ latents_dtype = text_embeddings.dtype
379
+ if latents is None:
380
+ if self.device.type == "mps":
381
+ # randn does not work reproducibly on mps
382
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
383
+ self.device
384
+ )
385
+ else:
386
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
387
+ else:
388
+ if latents.shape != latents_shape:
389
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
390
+ latents = latents.to(self.device)
391
+
392
+ # scale the initial noise by the standard deviation required by the scheduler
393
+ latents = latents * self.scheduler.init_noise_sigma
394
+
395
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
396
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
397
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
398
+ # and should be between [0, 1]
399
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
400
+ extra_step_kwargs = {}
401
+ if accepts_eta:
402
+ extra_step_kwargs["eta"] = eta
403
+
404
+ # check if the scheduler accepts generator
405
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
406
+ if accepts_generator:
407
+ extra_step_kwargs["generator"] = generator
408
+
409
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
410
+ for i, t in enumerate(timesteps):
411
+ # expand the latents if we are doing classifier free guidance
412
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
413
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
414
+
415
+ # predict the noise residual
416
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
417
+
418
+ # perform classifier free guidance
419
+ if do_classifier_free_guidance:
420
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
421
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
422
+
423
+ # perform clip guidance
424
+ if clip_guidance_scale > 0:
425
+ text_embeddings_for_guidance = (
426
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
427
+ )
428
+ noise_pred, latents = self.cond_fn(
429
+ latents,
430
+ t,
431
+ i,
432
+ text_embeddings_for_guidance,
433
+ noise_pred,
434
+ clip_image_embeddings,
435
+ clip_guidance_scale,
436
+ )
437
+
438
+ # compute the previous noisy sample x_t -> x_t-1
439
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
440
+
441
+ progress_bar.update()
442
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
443
+ latents = 1 / 0.18215 * latents
444
+ image = self.vae.decode(latents).sample
445
+
446
+ image = (image / 2 + 0.5).clamp(0, 1)
447
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
448
+
449
+ if output_type == "pil":
450
+ image = self.numpy_to_pil(image)
451
+
452
+ if not return_dict:
453
+ return (image, None)
454
+
455
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
v0.24.0/clip_guided_stable_diffusion.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+ from torchvision import transforms
8
+ from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
9
+
10
+ from diffusers import (
11
+ AutoencoderKL,
12
+ DDIMScheduler,
13
+ DiffusionPipeline,
14
+ DPMSolverMultistepScheduler,
15
+ LMSDiscreteScheduler,
16
+ PNDMScheduler,
17
+ UNet2DConditionModel,
18
+ )
19
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
20
+
21
+
22
+ class MakeCutouts(nn.Module):
23
+ def __init__(self, cut_size, cut_power=1.0):
24
+ super().__init__()
25
+
26
+ self.cut_size = cut_size
27
+ self.cut_power = cut_power
28
+
29
+ def forward(self, pixel_values, num_cutouts):
30
+ sideY, sideX = pixel_values.shape[2:4]
31
+ max_size = min(sideX, sideY)
32
+ min_size = min(sideX, sideY, self.cut_size)
33
+ cutouts = []
34
+ for _ in range(num_cutouts):
35
+ size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
36
+ offsetx = torch.randint(0, sideX - size + 1, ())
37
+ offsety = torch.randint(0, sideY - size + 1, ())
38
+ cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
39
+ cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
40
+ return torch.cat(cutouts)
41
+
42
+
43
+ def spherical_dist_loss(x, y):
44
+ x = F.normalize(x, dim=-1)
45
+ y = F.normalize(y, dim=-1)
46
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
47
+
48
+
49
+ def set_requires_grad(model, value):
50
+ for param in model.parameters():
51
+ param.requires_grad = value
52
+
53
+
54
+ class CLIPGuidedStableDiffusion(DiffusionPipeline):
55
+ """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
56
+ - https://github.com/Jack000/glid-3-xl
57
+ - https://github.dev/crowsonkb/k-diffusion
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ vae: AutoencoderKL,
63
+ text_encoder: CLIPTextModel,
64
+ clip_model: CLIPModel,
65
+ tokenizer: CLIPTokenizer,
66
+ unet: UNet2DConditionModel,
67
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
68
+ feature_extractor: CLIPImageProcessor,
69
+ ):
70
+ super().__init__()
71
+ self.register_modules(
72
+ vae=vae,
73
+ text_encoder=text_encoder,
74
+ clip_model=clip_model,
75
+ tokenizer=tokenizer,
76
+ unet=unet,
77
+ scheduler=scheduler,
78
+ feature_extractor=feature_extractor,
79
+ )
80
+
81
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
82
+ self.cut_out_size = (
83
+ feature_extractor.size
84
+ if isinstance(feature_extractor.size, int)
85
+ else feature_extractor.size["shortest_edge"]
86
+ )
87
+ self.make_cutouts = MakeCutouts(self.cut_out_size)
88
+
89
+ set_requires_grad(self.text_encoder, False)
90
+ set_requires_grad(self.clip_model, False)
91
+
92
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
93
+ if slice_size == "auto":
94
+ # half the attention head size is usually a good trade-off between
95
+ # speed and memory
96
+ slice_size = self.unet.config.attention_head_dim // 2
97
+ self.unet.set_attention_slice(slice_size)
98
+
99
+ def disable_attention_slicing(self):
100
+ self.enable_attention_slicing(None)
101
+
102
+ def freeze_vae(self):
103
+ set_requires_grad(self.vae, False)
104
+
105
+ def unfreeze_vae(self):
106
+ set_requires_grad(self.vae, True)
107
+
108
+ def freeze_unet(self):
109
+ set_requires_grad(self.unet, False)
110
+
111
+ def unfreeze_unet(self):
112
+ set_requires_grad(self.unet, True)
113
+
114
+ @torch.enable_grad()
115
+ def cond_fn(
116
+ self,
117
+ latents,
118
+ timestep,
119
+ index,
120
+ text_embeddings,
121
+ noise_pred_original,
122
+ text_embeddings_clip,
123
+ clip_guidance_scale,
124
+ num_cutouts,
125
+ use_cutouts=True,
126
+ ):
127
+ latents = latents.detach().requires_grad_()
128
+
129
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
130
+
131
+ # predict the noise residual
132
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
133
+
134
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
135
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
136
+ beta_prod_t = 1 - alpha_prod_t
137
+ # compute predicted original sample from predicted noise also called
138
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
139
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
140
+
141
+ fac = torch.sqrt(beta_prod_t)
142
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
143
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
144
+ sigma = self.scheduler.sigmas[index]
145
+ sample = latents - sigma * noise_pred
146
+ else:
147
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
148
+
149
+ sample = 1 / self.vae.config.scaling_factor * sample
150
+ image = self.vae.decode(sample).sample
151
+ image = (image / 2 + 0.5).clamp(0, 1)
152
+
153
+ if use_cutouts:
154
+ image = self.make_cutouts(image, num_cutouts)
155
+ else:
156
+ image = transforms.Resize(self.cut_out_size)(image)
157
+ image = self.normalize(image).to(latents.dtype)
158
+
159
+ image_embeddings_clip = self.clip_model.get_image_features(image)
160
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
161
+
162
+ if use_cutouts:
163
+ dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
164
+ dists = dists.view([num_cutouts, sample.shape[0], -1])
165
+ loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
166
+ else:
167
+ loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
168
+
169
+ grads = -torch.autograd.grad(loss, latents)[0]
170
+
171
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
172
+ latents = latents.detach() + grads * (sigma**2)
173
+ noise_pred = noise_pred_original
174
+ else:
175
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
176
+ return noise_pred, latents
177
+
178
+ @torch.no_grad()
179
+ def __call__(
180
+ self,
181
+ prompt: Union[str, List[str]],
182
+ height: Optional[int] = 512,
183
+ width: Optional[int] = 512,
184
+ num_inference_steps: Optional[int] = 50,
185
+ guidance_scale: Optional[float] = 7.5,
186
+ num_images_per_prompt: Optional[int] = 1,
187
+ eta: float = 0.0,
188
+ clip_guidance_scale: Optional[float] = 100,
189
+ clip_prompt: Optional[Union[str, List[str]]] = None,
190
+ num_cutouts: Optional[int] = 4,
191
+ use_cutouts: Optional[bool] = True,
192
+ generator: Optional[torch.Generator] = None,
193
+ latents: Optional[torch.FloatTensor] = None,
194
+ output_type: Optional[str] = "pil",
195
+ return_dict: bool = True,
196
+ ):
197
+ if isinstance(prompt, str):
198
+ batch_size = 1
199
+ elif isinstance(prompt, list):
200
+ batch_size = len(prompt)
201
+ else:
202
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
203
+
204
+ if height % 8 != 0 or width % 8 != 0:
205
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
206
+
207
+ # get prompt text embeddings
208
+ text_input = self.tokenizer(
209
+ prompt,
210
+ padding="max_length",
211
+ max_length=self.tokenizer.model_max_length,
212
+ truncation=True,
213
+ return_tensors="pt",
214
+ )
215
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
216
+ # duplicate text embeddings for each generation per prompt
217
+ text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
218
+
219
+ if clip_guidance_scale > 0:
220
+ if clip_prompt is not None:
221
+ clip_text_input = self.tokenizer(
222
+ clip_prompt,
223
+ padding="max_length",
224
+ max_length=self.tokenizer.model_max_length,
225
+ truncation=True,
226
+ return_tensors="pt",
227
+ ).input_ids.to(self.device)
228
+ else:
229
+ clip_text_input = text_input.input_ids.to(self.device)
230
+ text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
231
+ text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
232
+ # duplicate text embeddings clip for each generation per prompt
233
+ text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
234
+
235
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
236
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
237
+ # corresponds to doing no classifier free guidance.
238
+ do_classifier_free_guidance = guidance_scale > 1.0
239
+ # get unconditional embeddings for classifier free guidance
240
+ if do_classifier_free_guidance:
241
+ max_length = text_input.input_ids.shape[-1]
242
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
243
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
244
+ # duplicate unconditional embeddings for each generation per prompt
245
+ uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
246
+
247
+ # For classifier free guidance, we need to do two forward passes.
248
+ # Here we concatenate the unconditional and text embeddings into a single batch
249
+ # to avoid doing two forward passes
250
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
251
+
252
+ # get the initial random noise unless the user supplied it
253
+
254
+ # Unlike in other pipelines, latents need to be generated in the target device
255
+ # for 1-to-1 results reproducibility with the CompVis implementation.
256
+ # However this currently doesn't work in `mps`.
257
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
258
+ latents_dtype = text_embeddings.dtype
259
+ if latents is None:
260
+ if self.device.type == "mps":
261
+ # randn does not work reproducibly on mps
262
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
263
+ self.device
264
+ )
265
+ else:
266
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
267
+ else:
268
+ if latents.shape != latents_shape:
269
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
270
+ latents = latents.to(self.device)
271
+
272
+ # set timesteps
273
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
274
+ extra_set_kwargs = {}
275
+ if accepts_offset:
276
+ extra_set_kwargs["offset"] = 1
277
+
278
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
279
+
280
+ # Some schedulers like PNDM have timesteps as arrays
281
+ # It's more optimized to move all timesteps to correct device beforehand
282
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
283
+
284
+ # scale the initial noise by the standard deviation required by the scheduler
285
+ latents = latents * self.scheduler.init_noise_sigma
286
+
287
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
288
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
289
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
290
+ # and should be between [0, 1]
291
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
292
+ extra_step_kwargs = {}
293
+ if accepts_eta:
294
+ extra_step_kwargs["eta"] = eta
295
+
296
+ # check if the scheduler accepts generator
297
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
298
+ if accepts_generator:
299
+ extra_step_kwargs["generator"] = generator
300
+
301
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
302
+ # expand the latents if we are doing classifier free guidance
303
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
304
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
305
+
306
+ # predict the noise residual
307
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
308
+
309
+ # perform classifier free guidance
310
+ if do_classifier_free_guidance:
311
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
312
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
313
+
314
+ # perform clip guidance
315
+ if clip_guidance_scale > 0:
316
+ text_embeddings_for_guidance = (
317
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
318
+ )
319
+ noise_pred, latents = self.cond_fn(
320
+ latents,
321
+ t,
322
+ i,
323
+ text_embeddings_for_guidance,
324
+ noise_pred,
325
+ text_embeddings_clip,
326
+ clip_guidance_scale,
327
+ num_cutouts,
328
+ use_cutouts,
329
+ )
330
+
331
+ # compute the previous noisy sample x_t -> x_t-1
332
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
333
+
334
+ # scale and decode the image latents with vae
335
+ latents = 1 / self.vae.config.scaling_factor * latents
336
+ image = self.vae.decode(latents).sample
337
+
338
+ image = (image / 2 + 0.5).clamp(0, 1)
339
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
340
+
341
+ if output_type == "pil":
342
+ image = self.numpy_to_pil(image)
343
+
344
+ if not return_dict:
345
+ return (image, None)
346
+
347
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
v0.24.0/clip_guided_stable_diffusion_img2img.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+ from torchvision import transforms
10
+ from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
+
12
+ from diffusers import (
13
+ AutoencoderKL,
14
+ DDIMScheduler,
15
+ DiffusionPipeline,
16
+ DPMSolverMultistepScheduler,
17
+ LMSDiscreteScheduler,
18
+ PNDMScheduler,
19
+ UNet2DConditionModel,
20
+ )
21
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
+ from diffusers.utils import PIL_INTERPOLATION, deprecate
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ EXAMPLE_DOC_STRING = """
27
+ Examples:
28
+ ```
29
+ from io import BytesIO
30
+
31
+ import requests
32
+ import torch
33
+ from diffusers import DiffusionPipeline
34
+ from PIL import Image
35
+ from transformers import CLIPFeatureExtractor, CLIPModel
36
+
37
+ feature_extractor = CLIPFeatureExtractor.from_pretrained(
38
+ "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
39
+ )
40
+ clip_model = CLIPModel.from_pretrained(
41
+ "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
42
+ )
43
+
44
+
45
+ guided_pipeline = DiffusionPipeline.from_pretrained(
46
+ "CompVis/stable-diffusion-v1-4",
47
+ # custom_pipeline="clip_guided_stable_diffusion",
48
+ custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
49
+ clip_model=clip_model,
50
+ feature_extractor=feature_extractor,
51
+ torch_dtype=torch.float16,
52
+ )
53
+ guided_pipeline.enable_attention_slicing()
54
+ guided_pipeline = guided_pipeline.to("cuda")
55
+
56
+ prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
57
+
58
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
59
+
60
+ response = requests.get(url)
61
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
62
+
63
+ image = guided_pipeline(
64
+ prompt=prompt,
65
+ num_inference_steps=30,
66
+ image=init_image,
67
+ strength=0.75,
68
+ guidance_scale=7.5,
69
+ clip_guidance_scale=100,
70
+ num_cutouts=4,
71
+ use_cutouts=False,
72
+ ).images[0]
73
+ display(image)
74
+ ```
75
+ """
76
+
77
+
78
+ def preprocess(image, w, h):
79
+ if isinstance(image, torch.Tensor):
80
+ return image
81
+ elif isinstance(image, PIL.Image.Image):
82
+ image = [image]
83
+
84
+ if isinstance(image[0], PIL.Image.Image):
85
+ image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
86
+ image = np.concatenate(image, axis=0)
87
+ image = np.array(image).astype(np.float32) / 255.0
88
+ image = image.transpose(0, 3, 1, 2)
89
+ image = 2.0 * image - 1.0
90
+ image = torch.from_numpy(image)
91
+ elif isinstance(image[0], torch.Tensor):
92
+ image = torch.cat(image, dim=0)
93
+ return image
94
+
95
+
96
+ class MakeCutouts(nn.Module):
97
+ def __init__(self, cut_size, cut_power=1.0):
98
+ super().__init__()
99
+
100
+ self.cut_size = cut_size
101
+ self.cut_power = cut_power
102
+
103
+ def forward(self, pixel_values, num_cutouts):
104
+ sideY, sideX = pixel_values.shape[2:4]
105
+ max_size = min(sideX, sideY)
106
+ min_size = min(sideX, sideY, self.cut_size)
107
+ cutouts = []
108
+ for _ in range(num_cutouts):
109
+ size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
110
+ offsetx = torch.randint(0, sideX - size + 1, ())
111
+ offsety = torch.randint(0, sideY - size + 1, ())
112
+ cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
113
+ cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
114
+ return torch.cat(cutouts)
115
+
116
+
117
+ def spherical_dist_loss(x, y):
118
+ x = F.normalize(x, dim=-1)
119
+ y = F.normalize(y, dim=-1)
120
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
121
+
122
+
123
+ def set_requires_grad(model, value):
124
+ for param in model.parameters():
125
+ param.requires_grad = value
126
+
127
+
128
+ class CLIPGuidedStableDiffusion(DiffusionPipeline):
129
+ """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
130
+ - https://github.com/Jack000/glid-3-xl
131
+ - https://github.dev/crowsonkb/k-diffusion
132
+ """
133
+
134
+ def __init__(
135
+ self,
136
+ vae: AutoencoderKL,
137
+ text_encoder: CLIPTextModel,
138
+ clip_model: CLIPModel,
139
+ tokenizer: CLIPTokenizer,
140
+ unet: UNet2DConditionModel,
141
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
142
+ feature_extractor: CLIPFeatureExtractor,
143
+ ):
144
+ super().__init__()
145
+ self.register_modules(
146
+ vae=vae,
147
+ text_encoder=text_encoder,
148
+ clip_model=clip_model,
149
+ tokenizer=tokenizer,
150
+ unet=unet,
151
+ scheduler=scheduler,
152
+ feature_extractor=feature_extractor,
153
+ )
154
+
155
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
156
+ self.cut_out_size = (
157
+ feature_extractor.size
158
+ if isinstance(feature_extractor.size, int)
159
+ else feature_extractor.size["shortest_edge"]
160
+ )
161
+ self.make_cutouts = MakeCutouts(self.cut_out_size)
162
+
163
+ set_requires_grad(self.text_encoder, False)
164
+ set_requires_grad(self.clip_model, False)
165
+
166
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
167
+ if slice_size == "auto":
168
+ # half the attention head size is usually a good trade-off between
169
+ # speed and memory
170
+ slice_size = self.unet.config.attention_head_dim // 2
171
+ self.unet.set_attention_slice(slice_size)
172
+
173
+ def disable_attention_slicing(self):
174
+ self.enable_attention_slicing(None)
175
+
176
+ def freeze_vae(self):
177
+ set_requires_grad(self.vae, False)
178
+
179
+ def unfreeze_vae(self):
180
+ set_requires_grad(self.vae, True)
181
+
182
+ def freeze_unet(self):
183
+ set_requires_grad(self.unet, False)
184
+
185
+ def unfreeze_unet(self):
186
+ set_requires_grad(self.unet, True)
187
+
188
+ def get_timesteps(self, num_inference_steps, strength, device):
189
+ # get the original timestep using init_timestep
190
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
191
+
192
+ t_start = max(num_inference_steps - init_timestep, 0)
193
+ timesteps = self.scheduler.timesteps[t_start:]
194
+
195
+ return timesteps, num_inference_steps - t_start
196
+
197
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
198
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
199
+ raise ValueError(
200
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
201
+ )
202
+
203
+ image = image.to(device=device, dtype=dtype)
204
+
205
+ batch_size = batch_size * num_images_per_prompt
206
+ if isinstance(generator, list) and len(generator) != batch_size:
207
+ raise ValueError(
208
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
209
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
210
+ )
211
+
212
+ if isinstance(generator, list):
213
+ init_latents = [
214
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
215
+ ]
216
+ init_latents = torch.cat(init_latents, dim=0)
217
+ else:
218
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
219
+
220
+ init_latents = self.vae.config.scaling_factor * init_latents
221
+
222
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
223
+ # expand init_latents for batch_size
224
+ deprecation_message = (
225
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
226
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
227
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
228
+ " your script to pass as many initial images as text prompts to suppress this warning."
229
+ )
230
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
231
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
232
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
233
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
234
+ raise ValueError(
235
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
236
+ )
237
+ else:
238
+ init_latents = torch.cat([init_latents], dim=0)
239
+
240
+ shape = init_latents.shape
241
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
242
+
243
+ # get latents
244
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
245
+ latents = init_latents
246
+
247
+ return latents
248
+
249
+ @torch.enable_grad()
250
+ def cond_fn(
251
+ self,
252
+ latents,
253
+ timestep,
254
+ index,
255
+ text_embeddings,
256
+ noise_pred_original,
257
+ text_embeddings_clip,
258
+ clip_guidance_scale,
259
+ num_cutouts,
260
+ use_cutouts=True,
261
+ ):
262
+ latents = latents.detach().requires_grad_()
263
+
264
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
265
+
266
+ # predict the noise residual
267
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
268
+
269
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
270
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
271
+ beta_prod_t = 1 - alpha_prod_t
272
+ # compute predicted original sample from predicted noise also called
273
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
274
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
275
+
276
+ fac = torch.sqrt(beta_prod_t)
277
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
278
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
279
+ sigma = self.scheduler.sigmas[index]
280
+ sample = latents - sigma * noise_pred
281
+ else:
282
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
283
+
284
+ sample = 1 / self.vae.config.scaling_factor * sample
285
+ image = self.vae.decode(sample).sample
286
+ image = (image / 2 + 0.5).clamp(0, 1)
287
+
288
+ if use_cutouts:
289
+ image = self.make_cutouts(image, num_cutouts)
290
+ else:
291
+ image = transforms.Resize(self.cut_out_size)(image)
292
+ image = self.normalize(image).to(latents.dtype)
293
+
294
+ image_embeddings_clip = self.clip_model.get_image_features(image)
295
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
296
+
297
+ if use_cutouts:
298
+ dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
299
+ dists = dists.view([num_cutouts, sample.shape[0], -1])
300
+ loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
301
+ else:
302
+ loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
303
+
304
+ grads = -torch.autograd.grad(loss, latents)[0]
305
+
306
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
307
+ latents = latents.detach() + grads * (sigma**2)
308
+ noise_pred = noise_pred_original
309
+ else:
310
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
311
+ return noise_pred, latents
312
+
313
+ @torch.no_grad()
314
+ def __call__(
315
+ self,
316
+ prompt: Union[str, List[str]],
317
+ height: Optional[int] = 512,
318
+ width: Optional[int] = 512,
319
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
320
+ strength: float = 0.8,
321
+ num_inference_steps: Optional[int] = 50,
322
+ guidance_scale: Optional[float] = 7.5,
323
+ num_images_per_prompt: Optional[int] = 1,
324
+ eta: float = 0.0,
325
+ clip_guidance_scale: Optional[float] = 100,
326
+ clip_prompt: Optional[Union[str, List[str]]] = None,
327
+ num_cutouts: Optional[int] = 4,
328
+ use_cutouts: Optional[bool] = True,
329
+ generator: Optional[torch.Generator] = None,
330
+ latents: Optional[torch.FloatTensor] = None,
331
+ output_type: Optional[str] = "pil",
332
+ return_dict: bool = True,
333
+ ):
334
+ if isinstance(prompt, str):
335
+ batch_size = 1
336
+ elif isinstance(prompt, list):
337
+ batch_size = len(prompt)
338
+ else:
339
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
340
+
341
+ if height % 8 != 0 or width % 8 != 0:
342
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
343
+
344
+ # get prompt text embeddings
345
+ text_input = self.tokenizer(
346
+ prompt,
347
+ padding="max_length",
348
+ max_length=self.tokenizer.model_max_length,
349
+ truncation=True,
350
+ return_tensors="pt",
351
+ )
352
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
353
+ # duplicate text embeddings for each generation per prompt
354
+ text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
355
+
356
+ # set timesteps
357
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
358
+ extra_set_kwargs = {}
359
+ if accepts_offset:
360
+ extra_set_kwargs["offset"] = 1
361
+
362
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
363
+ # Some schedulers like PNDM have timesteps as arrays
364
+ # It's more optimized to move all timesteps to correct device beforehand
365
+ self.scheduler.timesteps.to(self.device)
366
+
367
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
368
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
369
+
370
+ # Preprocess image
371
+ image = preprocess(image, width, height)
372
+ latents = self.prepare_latents(
373
+ image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, self.device, generator
374
+ )
375
+
376
+ if clip_guidance_scale > 0:
377
+ if clip_prompt is not None:
378
+ clip_text_input = self.tokenizer(
379
+ clip_prompt,
380
+ padding="max_length",
381
+ max_length=self.tokenizer.model_max_length,
382
+ truncation=True,
383
+ return_tensors="pt",
384
+ ).input_ids.to(self.device)
385
+ else:
386
+ clip_text_input = text_input.input_ids.to(self.device)
387
+ text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
388
+ text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
389
+ # duplicate text embeddings clip for each generation per prompt
390
+ text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
391
+
392
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
393
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
394
+ # corresponds to doing no classifier free guidance.
395
+ do_classifier_free_guidance = guidance_scale > 1.0
396
+ # get unconditional embeddings for classifier free guidance
397
+ if do_classifier_free_guidance:
398
+ max_length = text_input.input_ids.shape[-1]
399
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
400
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
401
+ # duplicate unconditional embeddings for each generation per prompt
402
+ uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
403
+
404
+ # For classifier free guidance, we need to do two forward passes.
405
+ # Here we concatenate the unconditional and text embeddings into a single batch
406
+ # to avoid doing two forward passes
407
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
408
+
409
+ # get the initial random noise unless the user supplied it
410
+
411
+ # Unlike in other pipelines, latents need to be generated in the target device
412
+ # for 1-to-1 results reproducibility with the CompVis implementation.
413
+ # However this currently doesn't work in `mps`.
414
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
415
+ latents_dtype = text_embeddings.dtype
416
+ if latents is None:
417
+ if self.device.type == "mps":
418
+ # randn does not work reproducibly on mps
419
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
420
+ self.device
421
+ )
422
+ else:
423
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
424
+ else:
425
+ if latents.shape != latents_shape:
426
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
427
+ latents = latents.to(self.device)
428
+
429
+ # scale the initial noise by the standard deviation required by the scheduler
430
+ latents = latents * self.scheduler.init_noise_sigma
431
+
432
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
433
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
434
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
435
+ # and should be between [0, 1]
436
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
437
+ extra_step_kwargs = {}
438
+ if accepts_eta:
439
+ extra_step_kwargs["eta"] = eta
440
+
441
+ # check if the scheduler accepts generator
442
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
443
+ if accepts_generator:
444
+ extra_step_kwargs["generator"] = generator
445
+
446
+ with self.progress_bar(total=num_inference_steps):
447
+ for i, t in enumerate(timesteps):
448
+ # expand the latents if we are doing classifier free guidance
449
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
450
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
451
+
452
+ # predict the noise residual
453
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
454
+
455
+ # perform classifier free guidance
456
+ if do_classifier_free_guidance:
457
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
458
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
459
+
460
+ # perform clip guidance
461
+ if clip_guidance_scale > 0:
462
+ text_embeddings_for_guidance = (
463
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
464
+ )
465
+ noise_pred, latents = self.cond_fn(
466
+ latents,
467
+ t,
468
+ i,
469
+ text_embeddings_for_guidance,
470
+ noise_pred,
471
+ text_embeddings_clip,
472
+ clip_guidance_scale,
473
+ num_cutouts,
474
+ use_cutouts,
475
+ )
476
+
477
+ # compute the previous noisy sample x_t -> x_t-1
478
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
479
+
480
+ # scale and decode the image latents with vae
481
+ latents = 1 / self.vae.config.scaling_factor * latents
482
+ image = self.vae.decode(latents).sample
483
+
484
+ image = (image / 2 + 0.5).clamp(0, 1)
485
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
486
+
487
+ if output_type == "pil":
488
+ image = self.numpy_to_pil(image)
489
+
490
+ if not return_dict:
491
+ return (image, None)
492
+
493
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
v0.24.0/composable_stable_diffusion.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Callable, List, Optional, Union
17
+
18
+ import torch
19
+ from packaging import version
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
21
+
22
+ from diffusers import DiffusionPipeline
23
+ from diffusers.configuration_utils import FrozenDict
24
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
25
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
26
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
27
+ from diffusers.schedulers import (
28
+ DDIMScheduler,
29
+ DPMSolverMultistepScheduler,
30
+ EulerAncestralDiscreteScheduler,
31
+ EulerDiscreteScheduler,
32
+ LMSDiscreteScheduler,
33
+ PNDMScheduler,
34
+ )
35
+ from diffusers.utils import deprecate, is_accelerate_available, logging
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ class ComposableStableDiffusionPipeline(DiffusionPipeline):
42
+ r"""
43
+ Pipeline for text-to-image generation using Stable Diffusion.
44
+
45
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
46
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
47
+
48
+ Args:
49
+ vae ([`AutoencoderKL`]):
50
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
51
+ text_encoder ([`CLIPTextModel`]):
52
+ Frozen text-encoder. Stable Diffusion uses the text portion of
53
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
54
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
55
+ tokenizer (`CLIPTokenizer`):
56
+ Tokenizer of class
57
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
58
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
59
+ scheduler ([`SchedulerMixin`]):
60
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
61
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
62
+ safety_checker ([`StableDiffusionSafetyChecker`]):
63
+ Classification module that estimates whether generated images could be considered offensive or harmful.
64
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
65
+ feature_extractor ([`CLIPImageProcessor`]):
66
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
67
+ """
68
+
69
+ _optional_components = ["safety_checker", "feature_extractor"]
70
+
71
+ def __init__(
72
+ self,
73
+ vae: AutoencoderKL,
74
+ text_encoder: CLIPTextModel,
75
+ tokenizer: CLIPTokenizer,
76
+ unet: UNet2DConditionModel,
77
+ scheduler: Union[
78
+ DDIMScheduler,
79
+ PNDMScheduler,
80
+ LMSDiscreteScheduler,
81
+ EulerDiscreteScheduler,
82
+ EulerAncestralDiscreteScheduler,
83
+ DPMSolverMultistepScheduler,
84
+ ],
85
+ safety_checker: StableDiffusionSafetyChecker,
86
+ feature_extractor: CLIPImageProcessor,
87
+ requires_safety_checker: bool = True,
88
+ ):
89
+ super().__init__()
90
+
91
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
92
+ deprecation_message = (
93
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
94
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
95
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
96
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
97
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
98
+ " file"
99
+ )
100
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
101
+ new_config = dict(scheduler.config)
102
+ new_config["steps_offset"] = 1
103
+ scheduler._internal_dict = FrozenDict(new_config)
104
+
105
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
106
+ deprecation_message = (
107
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
108
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
109
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
110
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
111
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
112
+ )
113
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
114
+ new_config = dict(scheduler.config)
115
+ new_config["clip_sample"] = False
116
+ scheduler._internal_dict = FrozenDict(new_config)
117
+
118
+ if safety_checker is None and requires_safety_checker:
119
+ logger.warning(
120
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
121
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
122
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
123
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
124
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
125
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
126
+ )
127
+
128
+ if safety_checker is not None and feature_extractor is None:
129
+ raise ValueError(
130
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
131
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
132
+ )
133
+
134
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
135
+ version.parse(unet.config._diffusers_version).base_version
136
+ ) < version.parse("0.9.0.dev0")
137
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
138
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
139
+ deprecation_message = (
140
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
141
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
142
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
143
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
144
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
145
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
146
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
147
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
148
+ " the `unet/config.json` file"
149
+ )
150
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
151
+ new_config = dict(unet.config)
152
+ new_config["sample_size"] = 64
153
+ unet._internal_dict = FrozenDict(new_config)
154
+
155
+ self.register_modules(
156
+ vae=vae,
157
+ text_encoder=text_encoder,
158
+ tokenizer=tokenizer,
159
+ unet=unet,
160
+ scheduler=scheduler,
161
+ safety_checker=safety_checker,
162
+ feature_extractor=feature_extractor,
163
+ )
164
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
165
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
166
+
167
+ def enable_vae_slicing(self):
168
+ r"""
169
+ Enable sliced VAE decoding.
170
+
171
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
172
+ steps. This is useful to save some memory and allow larger batch sizes.
173
+ """
174
+ self.vae.enable_slicing()
175
+
176
+ def disable_vae_slicing(self):
177
+ r"""
178
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
179
+ computing decoding in one step.
180
+ """
181
+ self.vae.disable_slicing()
182
+
183
+ def enable_sequential_cpu_offload(self, gpu_id=0):
184
+ r"""
185
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
186
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
187
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
188
+ """
189
+ if is_accelerate_available():
190
+ from accelerate import cpu_offload
191
+ else:
192
+ raise ImportError("Please install accelerate via `pip install accelerate`")
193
+
194
+ device = torch.device(f"cuda:{gpu_id}")
195
+
196
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
197
+ if cpu_offloaded_model is not None:
198
+ cpu_offload(cpu_offloaded_model, device)
199
+
200
+ if self.safety_checker is not None:
201
+ # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate
202
+ # fix by only offloading self.safety_checker for now
203
+ cpu_offload(self.safety_checker.vision_model, device)
204
+
205
+ @property
206
+ def _execution_device(self):
207
+ r"""
208
+ Returns the device on which the pipeline's models will be executed. After calling
209
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
210
+ hooks.
211
+ """
212
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
213
+ return self.device
214
+ for module in self.unet.modules():
215
+ if (
216
+ hasattr(module, "_hf_hook")
217
+ and hasattr(module._hf_hook, "execution_device")
218
+ and module._hf_hook.execution_device is not None
219
+ ):
220
+ return torch.device(module._hf_hook.execution_device)
221
+ return self.device
222
+
223
+ def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
224
+ r"""
225
+ Encodes the prompt into text encoder hidden states.
226
+
227
+ Args:
228
+ prompt (`str` or `list(int)`):
229
+ prompt to be encoded
230
+ device: (`torch.device`):
231
+ torch device
232
+ num_images_per_prompt (`int`):
233
+ number of images that should be generated per prompt
234
+ do_classifier_free_guidance (`bool`):
235
+ whether to use classifier free guidance or not
236
+ negative_prompt (`str` or `List[str]`):
237
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
238
+ if `guidance_scale` is less than `1`).
239
+ """
240
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
241
+
242
+ text_inputs = self.tokenizer(
243
+ prompt,
244
+ padding="max_length",
245
+ max_length=self.tokenizer.model_max_length,
246
+ truncation=True,
247
+ return_tensors="pt",
248
+ )
249
+ text_input_ids = text_inputs.input_ids
250
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
251
+
252
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
253
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
254
+ logger.warning(
255
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
256
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
257
+ )
258
+
259
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
260
+ attention_mask = text_inputs.attention_mask.to(device)
261
+ else:
262
+ attention_mask = None
263
+
264
+ text_embeddings = self.text_encoder(
265
+ text_input_ids.to(device),
266
+ attention_mask=attention_mask,
267
+ )
268
+ text_embeddings = text_embeddings[0]
269
+
270
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
271
+ bs_embed, seq_len, _ = text_embeddings.shape
272
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
273
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
274
+
275
+ # get unconditional embeddings for classifier free guidance
276
+ if do_classifier_free_guidance:
277
+ uncond_tokens: List[str]
278
+ if negative_prompt is None:
279
+ uncond_tokens = [""] * batch_size
280
+ elif type(prompt) is not type(negative_prompt):
281
+ raise TypeError(
282
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
283
+ f" {type(prompt)}."
284
+ )
285
+ elif isinstance(negative_prompt, str):
286
+ uncond_tokens = [negative_prompt]
287
+ elif batch_size != len(negative_prompt):
288
+ raise ValueError(
289
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
290
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
291
+ " the batch size of `prompt`."
292
+ )
293
+ else:
294
+ uncond_tokens = negative_prompt
295
+
296
+ max_length = text_input_ids.shape[-1]
297
+ uncond_input = self.tokenizer(
298
+ uncond_tokens,
299
+ padding="max_length",
300
+ max_length=max_length,
301
+ truncation=True,
302
+ return_tensors="pt",
303
+ )
304
+
305
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
306
+ attention_mask = uncond_input.attention_mask.to(device)
307
+ else:
308
+ attention_mask = None
309
+
310
+ uncond_embeddings = self.text_encoder(
311
+ uncond_input.input_ids.to(device),
312
+ attention_mask=attention_mask,
313
+ )
314
+ uncond_embeddings = uncond_embeddings[0]
315
+
316
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
317
+ seq_len = uncond_embeddings.shape[1]
318
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
319
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
320
+
321
+ # For classifier free guidance, we need to do two forward passes.
322
+ # Here we concatenate the unconditional and text embeddings into a single batch
323
+ # to avoid doing two forward passes
324
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
325
+
326
+ return text_embeddings
327
+
328
+ def run_safety_checker(self, image, device, dtype):
329
+ if self.safety_checker is not None:
330
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
331
+ image, has_nsfw_concept = self.safety_checker(
332
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
333
+ )
334
+ else:
335
+ has_nsfw_concept = None
336
+ return image, has_nsfw_concept
337
+
338
+ def decode_latents(self, latents):
339
+ latents = 1 / 0.18215 * latents
340
+ image = self.vae.decode(latents).sample
341
+ image = (image / 2 + 0.5).clamp(0, 1)
342
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
343
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
344
+ return image
345
+
346
+ def prepare_extra_step_kwargs(self, generator, eta):
347
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
348
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
349
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
350
+ # and should be between [0, 1]
351
+
352
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
353
+ extra_step_kwargs = {}
354
+ if accepts_eta:
355
+ extra_step_kwargs["eta"] = eta
356
+
357
+ # check if the scheduler accepts generator
358
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
359
+ if accepts_generator:
360
+ extra_step_kwargs["generator"] = generator
361
+ return extra_step_kwargs
362
+
363
+ def check_inputs(self, prompt, height, width, callback_steps):
364
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
365
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
366
+
367
+ if height % 8 != 0 or width % 8 != 0:
368
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
369
+
370
+ if (callback_steps is None) or (
371
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
372
+ ):
373
+ raise ValueError(
374
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
375
+ f" {type(callback_steps)}."
376
+ )
377
+
378
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
379
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
380
+ if latents is None:
381
+ if device.type == "mps":
382
+ # randn does not work reproducibly on mps
383
+ latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
384
+ else:
385
+ latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
386
+ else:
387
+ if latents.shape != shape:
388
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
389
+ latents = latents.to(device)
390
+
391
+ # scale the initial noise by the standard deviation required by the scheduler
392
+ latents = latents * self.scheduler.init_noise_sigma
393
+ return latents
394
+
395
+ @torch.no_grad()
396
+ def __call__(
397
+ self,
398
+ prompt: Union[str, List[str]],
399
+ height: Optional[int] = None,
400
+ width: Optional[int] = None,
401
+ num_inference_steps: int = 50,
402
+ guidance_scale: float = 7.5,
403
+ negative_prompt: Optional[Union[str, List[str]]] = None,
404
+ num_images_per_prompt: Optional[int] = 1,
405
+ eta: float = 0.0,
406
+ generator: Optional[torch.Generator] = None,
407
+ latents: Optional[torch.FloatTensor] = None,
408
+ output_type: Optional[str] = "pil",
409
+ return_dict: bool = True,
410
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
411
+ callback_steps: int = 1,
412
+ weights: Optional[str] = "",
413
+ ):
414
+ r"""
415
+ Function invoked when calling the pipeline for generation.
416
+
417
+ Args:
418
+ prompt (`str` or `List[str]`):
419
+ The prompt or prompts to guide the image generation.
420
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
421
+ The height in pixels of the generated image.
422
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
423
+ The width in pixels of the generated image.
424
+ num_inference_steps (`int`, *optional*, defaults to 50):
425
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
426
+ expense of slower inference.
427
+ guidance_scale (`float`, *optional*, defaults to 5.0):
428
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
429
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
430
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
431
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
432
+ usually at the expense of lower image quality.
433
+ negative_prompt (`str` or `List[str]`, *optional*):
434
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
435
+ if `guidance_scale` is less than `1`).
436
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
437
+ The number of images to generate per prompt.
438
+ eta (`float`, *optional*, defaults to 0.0):
439
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
440
+ [`schedulers.DDIMScheduler`], will be ignored for others.
441
+ generator (`torch.Generator`, *optional*):
442
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
443
+ deterministic.
444
+ latents (`torch.FloatTensor`, *optional*):
445
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
446
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
447
+ tensor will ge generated by sampling using the supplied random `generator`.
448
+ output_type (`str`, *optional*, defaults to `"pil"`):
449
+ The output format of the generate image. Choose between
450
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
451
+ return_dict (`bool`, *optional*, defaults to `True`):
452
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
453
+ plain tuple.
454
+ callback (`Callable`, *optional*):
455
+ A function that will be called every `callback_steps` steps during inference. The function will be
456
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
457
+ callback_steps (`int`, *optional*, defaults to 1):
458
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
459
+ called at every step.
460
+
461
+ Returns:
462
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
463
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
464
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
465
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
466
+ (nsfw) content, according to the `safety_checker`.
467
+ """
468
+ # 0. Default height and width to unet
469
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
470
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
471
+
472
+ # 1. Check inputs. Raise error if not correct
473
+ self.check_inputs(prompt, height, width, callback_steps)
474
+
475
+ # 2. Define call parameters
476
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
477
+ device = self._execution_device
478
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
479
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
480
+ # corresponds to doing no classifier free guidance.
481
+ do_classifier_free_guidance = guidance_scale > 1.0
482
+
483
+ if "|" in prompt:
484
+ prompt = [x.strip() for x in prompt.split("|")]
485
+ print(f"composing {prompt}...")
486
+
487
+ if not weights:
488
+ # specify weights for prompts (excluding the unconditional score)
489
+ print("using equal positive weights (conjunction) for all prompts...")
490
+ weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1)
491
+ else:
492
+ # set prompt weight for each
493
+ num_prompts = len(prompt) if isinstance(prompt, list) else 1
494
+ weights = [float(w.strip()) for w in weights.split("|")]
495
+ # guidance scale as the default
496
+ if len(weights) < num_prompts:
497
+ weights.append(guidance_scale)
498
+ else:
499
+ weights = weights[:num_prompts]
500
+ assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts"
501
+ weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1)
502
+ else:
503
+ weights = guidance_scale
504
+
505
+ # 3. Encode input prompt
506
+ text_embeddings = self._encode_prompt(
507
+ prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
508
+ )
509
+
510
+ # 4. Prepare timesteps
511
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
512
+ timesteps = self.scheduler.timesteps
513
+
514
+ # 5. Prepare latent variables
515
+ num_channels_latents = self.unet.config.in_channels
516
+ latents = self.prepare_latents(
517
+ batch_size * num_images_per_prompt,
518
+ num_channels_latents,
519
+ height,
520
+ width,
521
+ text_embeddings.dtype,
522
+ device,
523
+ generator,
524
+ latents,
525
+ )
526
+
527
+ # composable diffusion
528
+ if isinstance(prompt, list) and batch_size == 1:
529
+ # remove extra unconditional embedding
530
+ # N = one unconditional embed + conditional embeds
531
+ text_embeddings = text_embeddings[len(prompt) - 1 :]
532
+
533
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
534
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
535
+
536
+ # 7. Denoising loop
537
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
538
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
539
+ for i, t in enumerate(timesteps):
540
+ # expand the latents if we are doing classifier free guidance
541
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
542
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
543
+
544
+ # predict the noise residual
545
+ noise_pred = []
546
+ for j in range(text_embeddings.shape[0]):
547
+ noise_pred.append(
548
+ self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample
549
+ )
550
+ noise_pred = torch.cat(noise_pred, dim=0)
551
+
552
+ # perform guidance
553
+ if do_classifier_free_guidance:
554
+ noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:]
555
+ noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum(
556
+ dim=0, keepdims=True
557
+ )
558
+
559
+ # compute the previous noisy sample x_t -> x_t-1
560
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
561
+
562
+ # call the callback, if provided
563
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
564
+ progress_bar.update()
565
+ if callback is not None and i % callback_steps == 0:
566
+ step_idx = i // getattr(self.scheduler, "order", 1)
567
+ callback(step_idx, t, latents)
568
+
569
+ # 8. Post-processing
570
+ image = self.decode_latents(latents)
571
+
572
+ # 9. Run safety checker
573
+ image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
574
+
575
+ # 10. Convert to PIL
576
+ if output_type == "pil":
577
+ image = self.numpy_to_pil(image)
578
+
579
+ if not return_dict:
580
+ return (image, has_nsfw_concept)
581
+
582
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/ddim_noise_comparative_analysis.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import List, Optional, Tuple, Union
16
+
17
+ import PIL.Image
18
+ import torch
19
+ from torchvision import transforms
20
+
21
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
+ from diffusers.schedulers import DDIMScheduler
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ trans = transforms.Compose(
27
+ [
28
+ transforms.Resize((256, 256)),
29
+ transforms.ToTensor(),
30
+ transforms.Normalize([0.5], [0.5]),
31
+ ]
32
+ )
33
+
34
+
35
+ def preprocess(image):
36
+ if isinstance(image, torch.Tensor):
37
+ return image
38
+ elif isinstance(image, PIL.Image.Image):
39
+ image = [image]
40
+
41
+ image = [trans(img.convert("RGB")) for img in image]
42
+ image = torch.stack(image)
43
+ return image
44
+
45
+
46
+ class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline):
47
+ r"""
48
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
49
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
50
+
51
+ Parameters:
52
+ unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
53
+ scheduler ([`SchedulerMixin`]):
54
+ A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
55
+ [`DDPMScheduler`], or [`DDIMScheduler`].
56
+ """
57
+
58
+ def __init__(self, unet, scheduler):
59
+ super().__init__()
60
+
61
+ # make sure scheduler can always be converted to DDIM
62
+ scheduler = DDIMScheduler.from_config(scheduler.config)
63
+
64
+ self.register_modules(unet=unet, scheduler=scheduler)
65
+
66
+ def check_inputs(self, strength):
67
+ if strength < 0 or strength > 1:
68
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
69
+
70
+ def get_timesteps(self, num_inference_steps, strength, device):
71
+ # get the original timestep using init_timestep
72
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
73
+
74
+ t_start = max(num_inference_steps - init_timestep, 0)
75
+ timesteps = self.scheduler.timesteps[t_start:]
76
+
77
+ return timesteps, num_inference_steps - t_start
78
+
79
+ def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
80
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
81
+ raise ValueError(
82
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
83
+ )
84
+
85
+ init_latents = image.to(device=device, dtype=dtype)
86
+
87
+ if isinstance(generator, list) and len(generator) != batch_size:
88
+ raise ValueError(
89
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
90
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
91
+ )
92
+
93
+ shape = init_latents.shape
94
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
95
+
96
+ # get latents
97
+ print("add noise to latents at timestep", timestep)
98
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
99
+ latents = init_latents
100
+
101
+ return latents
102
+
103
+ @torch.no_grad()
104
+ def __call__(
105
+ self,
106
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
107
+ strength: float = 0.8,
108
+ batch_size: int = 1,
109
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
110
+ eta: float = 0.0,
111
+ num_inference_steps: int = 50,
112
+ use_clipped_model_output: Optional[bool] = None,
113
+ output_type: Optional[str] = "pil",
114
+ return_dict: bool = True,
115
+ ) -> Union[ImagePipelineOutput, Tuple]:
116
+ r"""
117
+ Args:
118
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
119
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
120
+ process.
121
+ strength (`float`, *optional*, defaults to 0.8):
122
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
123
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
124
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
125
+ be maximum and the denoising process will run for the full number of iterations specified in
126
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
127
+ batch_size (`int`, *optional*, defaults to 1):
128
+ The number of images to generate.
129
+ generator (`torch.Generator`, *optional*):
130
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
131
+ to make generation deterministic.
132
+ eta (`float`, *optional*, defaults to 0.0):
133
+ The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
134
+ num_inference_steps (`int`, *optional*, defaults to 50):
135
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
136
+ expense of slower inference.
137
+ use_clipped_model_output (`bool`, *optional*, defaults to `None`):
138
+ if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed
139
+ downstream to the scheduler. So use `None` for schedulers which don't support this argument.
140
+ output_type (`str`, *optional*, defaults to `"pil"`):
141
+ The output format of the generate image. Choose between
142
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
143
+ return_dict (`bool`, *optional*, defaults to `True`):
144
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
145
+
146
+ Returns:
147
+ [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
148
+ True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
149
+ """
150
+ # 1. Check inputs. Raise error if not correct
151
+ self.check_inputs(strength)
152
+
153
+ # 2. Preprocess image
154
+ image = preprocess(image)
155
+
156
+ # 3. set timesteps
157
+ self.scheduler.set_timesteps(num_inference_steps, device=self.device)
158
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
159
+ latent_timestep = timesteps[:1].repeat(batch_size)
160
+
161
+ # 4. Prepare latent variables
162
+ latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator)
163
+ image = latents
164
+
165
+ # 5. Denoising loop
166
+ for t in self.progress_bar(timesteps):
167
+ # 1. predict noise model_output
168
+ model_output = self.unet(image, t).sample
169
+
170
+ # 2. predict previous mean of image x_t-1 and add variance depending on eta
171
+ # eta corresponds to η in paper and should be between [0, 1]
172
+ # do x_t -> x_t-1
173
+ image = self.scheduler.step(
174
+ model_output,
175
+ t,
176
+ image,
177
+ eta=eta,
178
+ use_clipped_model_output=use_clipped_model_output,
179
+ generator=generator,
180
+ ).prev_sample
181
+
182
+ image = (image / 2 + 0.5).clamp(0, 1)
183
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
184
+ if output_type == "pil":
185
+ image = self.numpy_to_pil(image)
186
+
187
+ if not return_dict:
188
+ return (image, latent_timestep.item())
189
+
190
+ return ImagePipelineOutput(images=image)
v0.24.0/dps_pipeline.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from math import pi
17
+ from typing import Callable, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ from PIL import Image
22
+
23
+ from diffusers import DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DModel
24
+ from diffusers.utils.torch_utils import randn_tensor
25
+
26
+
27
+ class DPSPipeline(DiffusionPipeline):
28
+ r"""
29
+ Pipeline for Diffusion Posterior Sampling.
30
+
31
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
32
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
33
+
34
+ Parameters:
35
+ unet ([`UNet2DModel`]):
36
+ A `UNet2DModel` to denoise the encoded image latents.
37
+ scheduler ([`SchedulerMixin`]):
38
+ A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
39
+ [`DDPMScheduler`], or [`DDIMScheduler`].
40
+ """
41
+
42
+ model_cpu_offload_seq = "unet"
43
+
44
+ def __init__(self, unet, scheduler):
45
+ super().__init__()
46
+ self.register_modules(unet=unet, scheduler=scheduler)
47
+
48
+ @torch.no_grad()
49
+ def __call__(
50
+ self,
51
+ measurement: torch.Tensor,
52
+ operator: torch.nn.Module,
53
+ loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
54
+ batch_size: int = 1,
55
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
56
+ num_inference_steps: int = 1000,
57
+ output_type: Optional[str] = "pil",
58
+ return_dict: bool = True,
59
+ zeta: float = 0.3,
60
+ ) -> Union[ImagePipelineOutput, Tuple]:
61
+ r"""
62
+ The call function to the pipeline for generation.
63
+
64
+ Args:
65
+ measurement (`torch.Tensor`, *required*):
66
+ A 'torch.Tensor', the corrupted image
67
+ operator (`torch.nn.Module`, *required*):
68
+ A 'torch.nn.Module', the operator generating the corrupted image
69
+ loss_fn (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *required*):
70
+ A 'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', the loss function used
71
+ between the measurements, for most of the cases using RMSE is fine.
72
+ batch_size (`int`, *optional*, defaults to 1):
73
+ The number of images to generate.
74
+ generator (`torch.Generator`, *optional*):
75
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
76
+ generation deterministic.
77
+ num_inference_steps (`int`, *optional*, defaults to 1000):
78
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
79
+ expense of slower inference.
80
+ output_type (`str`, *optional*, defaults to `"pil"`):
81
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
82
+ return_dict (`bool`, *optional*, defaults to `True`):
83
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
84
+
85
+ Example:
86
+
87
+ ```py
88
+ >>> from diffusers import DDPMPipeline
89
+
90
+ >>> # load model and scheduler
91
+ >>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
92
+
93
+ >>> # run pipeline in inference (sample random noise and denoise)
94
+ >>> image = pipe().images[0]
95
+
96
+ >>> # save image
97
+ >>> image.save("ddpm_generated_image.png")
98
+ ```
99
+
100
+ Returns:
101
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
102
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
103
+ returned where the first element is a list with the generated images
104
+ """
105
+ # Sample gaussian noise to begin loop
106
+ if isinstance(self.unet.config.sample_size, int):
107
+ image_shape = (
108
+ batch_size,
109
+ self.unet.config.in_channels,
110
+ self.unet.config.sample_size,
111
+ self.unet.config.sample_size,
112
+ )
113
+ else:
114
+ image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
115
+
116
+ if self.device.type == "mps":
117
+ # randn does not work reproducibly on mps
118
+ image = randn_tensor(image_shape, generator=generator)
119
+ image = image.to(self.device)
120
+ else:
121
+ image = randn_tensor(image_shape, generator=generator, device=self.device)
122
+
123
+ # set step values
124
+ self.scheduler.set_timesteps(num_inference_steps)
125
+
126
+ for t in self.progress_bar(self.scheduler.timesteps):
127
+ with torch.enable_grad():
128
+ # 1. predict noise model_output
129
+ image = image.requires_grad_()
130
+ model_output = self.unet(image, t).sample
131
+
132
+ # 2. compute previous image x'_{t-1} and original prediction x0_{t}
133
+ scheduler_out = self.scheduler.step(model_output, t, image, generator=generator)
134
+ image_pred, origi_pred = scheduler_out.prev_sample, scheduler_out.pred_original_sample
135
+
136
+ # 3. compute y'_t = f(x0_{t})
137
+ measurement_pred = operator(origi_pred)
138
+
139
+ # 4. compute loss = d(y, y'_t-1)
140
+ loss = loss_fn(measurement, measurement_pred)
141
+ loss.backward()
142
+
143
+ print("distance: {0:.4f}".format(loss.item()))
144
+
145
+ with torch.no_grad():
146
+ image_pred = image_pred - zeta * image.grad
147
+ image = image_pred.detach()
148
+
149
+ image = (image / 2 + 0.5).clamp(0, 1)
150
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
151
+ if output_type == "pil":
152
+ image = self.numpy_to_pil(image)
153
+
154
+ if not return_dict:
155
+ return (image,)
156
+
157
+ return ImagePipelineOutput(images=image)
158
+
159
+
160
+ if __name__ == "__main__":
161
+ import scipy
162
+ from torch import nn
163
+ from torchvision.utils import save_image
164
+
165
+ # defining the operators f(.) of y = f(x)
166
+ # super-resolution operator
167
+ class SuperResolutionOperator(nn.Module):
168
+ def __init__(self, in_shape, scale_factor):
169
+ super().__init__()
170
+
171
+ # Resizer local class, do not use outiside the SR operator class
172
+ class Resizer(nn.Module):
173
+ def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True):
174
+ super(Resizer, self).__init__()
175
+
176
+ # First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa
177
+ scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor)
178
+
179
+ # Choose interpolation method, each method has the matching kernel size
180
+ def cubic(x):
181
+ absx = np.abs(x)
182
+ absx2 = absx**2
183
+ absx3 = absx**3
184
+ return (1.5 * absx3 - 2.5 * absx2 + 1) * (absx <= 1) + (
185
+ -0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2
186
+ ) * ((1 < absx) & (absx <= 2))
187
+
188
+ def lanczos2(x):
189
+ return (
190
+ (np.sin(pi * x) * np.sin(pi * x / 2) + np.finfo(np.float32).eps)
191
+ / ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps)
192
+ ) * (abs(x) < 2)
193
+
194
+ def box(x):
195
+ return ((-0.5 <= x) & (x < 0.5)) * 1.0
196
+
197
+ def lanczos3(x):
198
+ return (
199
+ (np.sin(pi * x) * np.sin(pi * x / 3) + np.finfo(np.float32).eps)
200
+ / ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps)
201
+ ) * (abs(x) < 3)
202
+
203
+ def linear(x):
204
+ return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
205
+
206
+ method, kernel_width = {
207
+ "cubic": (cubic, 4.0),
208
+ "lanczos2": (lanczos2, 4.0),
209
+ "lanczos3": (lanczos3, 6.0),
210
+ "box": (box, 1.0),
211
+ "linear": (linear, 2.0),
212
+ None: (cubic, 4.0), # set default interpolation method as cubic
213
+ }.get(kernel)
214
+
215
+ # Antialiasing is only used when downscaling
216
+ antialiasing *= np.any(np.array(scale_factor) < 1)
217
+
218
+ # Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient
219
+ sorted_dims = np.argsort(np.array(scale_factor))
220
+ self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1]
221
+
222
+ # Iterate over dimensions to calculate local weights for resizing and resize each time in one direction
223
+ field_of_view_list = []
224
+ weights_list = []
225
+ for dim in self.sorted_dims:
226
+ # for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the
227
+ # weights that multiply the values there to get its result.
228
+ weights, field_of_view = self.contributions(
229
+ in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing
230
+ )
231
+
232
+ # convert to torch tensor
233
+ weights = torch.tensor(weights.T, dtype=torch.float32)
234
+
235
+ # We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for
236
+ # tmp_im[field_of_view.T], (bsxfun style)
237
+ weights_list.append(
238
+ nn.Parameter(
239
+ torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]),
240
+ requires_grad=False,
241
+ )
242
+ )
243
+ field_of_view_list.append(
244
+ nn.Parameter(
245
+ torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long), requires_grad=False
246
+ )
247
+ )
248
+
249
+ self.field_of_view = nn.ParameterList(field_of_view_list)
250
+ self.weights = nn.ParameterList(weights_list)
251
+
252
+ def forward(self, in_tensor):
253
+ x = in_tensor
254
+
255
+ # Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim
256
+ for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights):
257
+ # To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize
258
+ x = torch.transpose(x, dim, 0)
259
+
260
+ # This is a bit of a complicated multiplication: x[field_of_view.T] is a tensor of order image_dims+1.
261
+ # for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim
262
+ # only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with
263
+ # the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:
264
+ # matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the
265
+ # same number
266
+ x = torch.sum(x[fov] * w, dim=0)
267
+
268
+ # Finally we swap back the axes to the original order
269
+ x = torch.transpose(x, dim, 0)
270
+
271
+ return x
272
+
273
+ def fix_scale_and_size(self, input_shape, output_shape, scale_factor):
274
+ # First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the
275
+ # same size as the number of input dimensions)
276
+ if scale_factor is not None:
277
+ # By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.
278
+ if np.isscalar(scale_factor) and len(input_shape) > 1:
279
+ scale_factor = [scale_factor, scale_factor]
280
+
281
+ # We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales
282
+ scale_factor = list(scale_factor)
283
+ scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor
284
+
285
+ # Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size
286
+ # to all the unspecified dimensions
287
+ if output_shape is not None:
288
+ output_shape = list(input_shape[len(output_shape) :]) + list(np.uint(np.array(output_shape)))
289
+
290
+ # Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is
291
+ # sub-optimal, because there can be different scales to the same output-shape.
292
+ if scale_factor is None:
293
+ scale_factor = 1.0 * np.array(output_shape) / np.array(input_shape)
294
+
295
+ # Dealing with missing output-shape. calculating according to scale-factor
296
+ if output_shape is None:
297
+ output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))
298
+
299
+ return scale_factor, output_shape
300
+
301
+ def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing):
302
+ # This function calculates a set of 'filters' and a set of field_of_view that will later on be applied
303
+ # such that each position from the field_of_view will be multiplied with a matching filter from the
304
+ # 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers
305
+ # around it. This is only done for one dimension of the image.
306
+
307
+ # When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of
308
+ # 1/sf. this means filtering is more 'low-pass filter'.
309
+ fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing else kernel
310
+ kernel_width *= 1.0 / scale if antialiasing else 1.0
311
+
312
+ # These are the coordinates of the output image
313
+ out_coordinates = np.arange(1, out_length + 1)
314
+
315
+ # since both scale-factor and output size can be provided simulatneously, perserving the center of the image requires shifting
316
+ # the output coordinates. the deviation is because out_length doesn't necesary equal in_length*scale.
317
+ # to keep the center we need to subtract half of this deivation so that we get equal margins for boths sides and center is preserved.
318
+ shifted_out_coordinates = out_coordinates - (out_length - in_length * scale) / 2
319
+
320
+ # These are the matching positions of the output-coordinates on the input image coordinates.
321
+ # Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:
322
+ # [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.
323
+ # The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to
324
+ # the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big
325
+ # one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).
326
+ # So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is
327
+ # at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:
328
+ # (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)
329
+ match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale)
330
+
331
+ # This is the left boundary to start multiplying the filter from, it depends on the size of the filter
332
+ left_boundary = np.floor(match_coordinates - kernel_width / 2)
333
+
334
+ # Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers
335
+ # of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)
336
+ expanded_kernel_width = np.ceil(kernel_width) + 2
337
+
338
+ # Determine a set of field_of_view for each each output position, these are the pixels in the input image
339
+ # that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the
340
+ # vertical dim is the pixels it 'sees' (kernel_size + 2)
341
+ field_of_view = np.squeeze(
342
+ np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1)
343
+ )
344
+
345
+ # Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the
346
+ # vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
347
+ # 'field_of_view')
348
+ weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
349
+
350
+ # Normalize weights to sum up to 1. be careful from dividing by 0
351
+ sum_weights = np.sum(weights, axis=1)
352
+ sum_weights[sum_weights == 0] = 1.0
353
+ weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)
354
+
355
+ # We use this mirror structure as a trick for reflection padding at the boundaries
356
+ mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))
357
+ field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]
358
+
359
+ # Get rid of weights and pixel positions that are of zero weight
360
+ non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))
361
+ weights = np.squeeze(weights[:, non_zero_out_pixels])
362
+ field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])
363
+
364
+ # Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size
365
+ return weights, field_of_view
366
+
367
+ self.down_sample = Resizer(in_shape, 1 / scale_factor)
368
+ for param in self.parameters():
369
+ param.requires_grad = False
370
+
371
+ def forward(self, data, **kwargs):
372
+ return self.down_sample(data)
373
+
374
+ # Gaussian blurring operator
375
+ class GaussialBlurOperator(nn.Module):
376
+ def __init__(self, kernel_size, intensity):
377
+ super().__init__()
378
+
379
+ class Blurkernel(nn.Module):
380
+ def __init__(self, blur_type="gaussian", kernel_size=31, std=3.0):
381
+ super().__init__()
382
+ self.blur_type = blur_type
383
+ self.kernel_size = kernel_size
384
+ self.std = std
385
+ self.seq = nn.Sequential(
386
+ nn.ReflectionPad2d(self.kernel_size // 2),
387
+ nn.Conv2d(3, 3, self.kernel_size, stride=1, padding=0, bias=False, groups=3),
388
+ )
389
+ self.weights_init()
390
+
391
+ def forward(self, x):
392
+ return self.seq(x)
393
+
394
+ def weights_init(self):
395
+ if self.blur_type == "gaussian":
396
+ n = np.zeros((self.kernel_size, self.kernel_size))
397
+ n[self.kernel_size // 2, self.kernel_size // 2] = 1
398
+ k = scipy.ndimage.gaussian_filter(n, sigma=self.std)
399
+ k = torch.from_numpy(k)
400
+ self.k = k
401
+ for name, f in self.named_parameters():
402
+ f.data.copy_(k)
403
+
404
+ def update_weights(self, k):
405
+ if not torch.is_tensor(k):
406
+ k = torch.from_numpy(k)
407
+ for name, f in self.named_parameters():
408
+ f.data.copy_(k)
409
+
410
+ def get_kernel(self):
411
+ return self.k
412
+
413
+ self.kernel_size = kernel_size
414
+ self.conv = Blurkernel(blur_type="gaussian", kernel_size=kernel_size, std=intensity)
415
+ self.kernel = self.conv.get_kernel()
416
+ self.conv.update_weights(self.kernel.type(torch.float32))
417
+
418
+ for param in self.parameters():
419
+ param.requires_grad = False
420
+
421
+ def forward(self, data, **kwargs):
422
+ return self.conv(data)
423
+
424
+ def transpose(self, data, **kwargs):
425
+ return data
426
+
427
+ def get_kernel(self):
428
+ return self.kernel.view(1, 1, self.kernel_size, self.kernel_size)
429
+
430
+ # assuming the forward process y = f(x) is polluted by Gaussian noise, use l2 norm
431
+ def RMSELoss(yhat, y):
432
+ return torch.sqrt(torch.sum((yhat - y) ** 2))
433
+
434
+ # set up source image
435
+ src = Image.open("sample.png")
436
+ # read image into [1,3,H,W]
437
+ src = torch.from_numpy(np.array(src, dtype=np.float32)).permute(2, 0, 1)[None]
438
+ # normalize image to [-1,1]
439
+ src = (src / 127.5) - 1.0
440
+ src = src.to("cuda")
441
+
442
+ # set up operator and measurement
443
+ # operator = SuperResolutionOperator(in_shape=src.shape, scale_factor=4).to("cuda")
444
+ operator = GaussialBlurOperator(kernel_size=61, intensity=3.0).to("cuda")
445
+ measurement = operator(src)
446
+
447
+ # set up scheduler
448
+ scheduler = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256")
449
+ scheduler.set_timesteps(1000)
450
+
451
+ # set up model
452
+ model = UNet2DModel.from_pretrained("google/ddpm-celebahq-256").to("cuda")
453
+
454
+ save_image((src + 1.0) / 2.0, "dps_src.png")
455
+ save_image((measurement + 1.0) / 2.0, "dps_mea.png")
456
+
457
+ # finally, the pipeline
458
+ dpspipe = DPSPipeline(model, scheduler)
459
+ image = dpspipe(
460
+ measurement=measurement,
461
+ operator=operator,
462
+ loss_fn=RMSELoss,
463
+ zeta=1.0,
464
+ ).images[0]
465
+
466
+ image.save("dps_generated_image.png")
v0.24.0/edict_pipeline.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ from PIL import Image
5
+ from tqdm.auto import tqdm
6
+ from transformers import CLIPTextModel, CLIPTokenizer
7
+
8
+ from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel
9
+ from diffusers.image_processor import VaeImageProcessor
10
+ from diffusers.utils import (
11
+ deprecate,
12
+ )
13
+
14
+
15
+ class EDICTPipeline(DiffusionPipeline):
16
+ def __init__(
17
+ self,
18
+ vae: AutoencoderKL,
19
+ text_encoder: CLIPTextModel,
20
+ tokenizer: CLIPTokenizer,
21
+ unet: UNet2DConditionModel,
22
+ scheduler: DDIMScheduler,
23
+ mixing_coeff: float = 0.93,
24
+ leapfrog_steps: bool = True,
25
+ ):
26
+ self.mixing_coeff = mixing_coeff
27
+ self.leapfrog_steps = leapfrog_steps
28
+
29
+ super().__init__()
30
+ self.register_modules(
31
+ vae=vae,
32
+ text_encoder=text_encoder,
33
+ tokenizer=tokenizer,
34
+ unet=unet,
35
+ scheduler=scheduler,
36
+ )
37
+
38
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
39
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
40
+
41
+ def _encode_prompt(
42
+ self, prompt: str, negative_prompt: Optional[str] = None, do_classifier_free_guidance: bool = False
43
+ ):
44
+ text_inputs = self.tokenizer(
45
+ prompt,
46
+ padding="max_length",
47
+ max_length=self.tokenizer.model_max_length,
48
+ truncation=True,
49
+ return_tensors="pt",
50
+ )
51
+
52
+ prompt_embeds = self.text_encoder(text_inputs.input_ids.to(self.device)).last_hidden_state
53
+
54
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=self.device)
55
+
56
+ if do_classifier_free_guidance:
57
+ uncond_tokens = "" if negative_prompt is None else negative_prompt
58
+
59
+ uncond_input = self.tokenizer(
60
+ uncond_tokens,
61
+ padding="max_length",
62
+ max_length=self.tokenizer.model_max_length,
63
+ truncation=True,
64
+ return_tensors="pt",
65
+ )
66
+
67
+ negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device)).last_hidden_state
68
+
69
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
70
+
71
+ return prompt_embeds
72
+
73
+ def denoise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
74
+ x = self.mixing_coeff * x + (1 - self.mixing_coeff) * y
75
+ y = self.mixing_coeff * y + (1 - self.mixing_coeff) * x
76
+
77
+ return [x, y]
78
+
79
+ def noise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
80
+ y = (y - (1 - self.mixing_coeff) * x) / self.mixing_coeff
81
+ x = (x - (1 - self.mixing_coeff) * y) / self.mixing_coeff
82
+
83
+ return [x, y]
84
+
85
+ def _get_alpha_and_beta(self, t: torch.Tensor):
86
+ # as self.alphas_cumprod is always in cpu
87
+ t = int(t)
88
+
89
+ alpha_prod = self.scheduler.alphas_cumprod[t] if t >= 0 else self.scheduler.final_alpha_cumprod
90
+
91
+ return alpha_prod, 1 - alpha_prod
92
+
93
+ def noise_step(
94
+ self,
95
+ base: torch.Tensor,
96
+ model_input: torch.Tensor,
97
+ model_output: torch.Tensor,
98
+ timestep: torch.Tensor,
99
+ ):
100
+ prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
101
+
102
+ alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
103
+ alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
104
+
105
+ a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
106
+ b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
107
+
108
+ next_model_input = (base - b_t * model_output) / a_t
109
+
110
+ return model_input, next_model_input.to(base.dtype)
111
+
112
+ def denoise_step(
113
+ self,
114
+ base: torch.Tensor,
115
+ model_input: torch.Tensor,
116
+ model_output: torch.Tensor,
117
+ timestep: torch.Tensor,
118
+ ):
119
+ prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
120
+
121
+ alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
122
+ alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
123
+
124
+ a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
125
+ b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
126
+ next_model_input = a_t * base + b_t * model_output
127
+
128
+ return model_input, next_model_input.to(base.dtype)
129
+
130
+ @torch.no_grad()
131
+ def decode_latents(self, latents: torch.Tensor):
132
+ latents = 1 / self.vae.config.scaling_factor * latents
133
+ image = self.vae.decode(latents).sample
134
+ image = (image / 2 + 0.5).clamp(0, 1)
135
+ return image
136
+
137
+ @torch.no_grad()
138
+ def prepare_latents(
139
+ self,
140
+ image: Image.Image,
141
+ text_embeds: torch.Tensor,
142
+ timesteps: torch.Tensor,
143
+ guidance_scale: float,
144
+ generator: Optional[torch.Generator] = None,
145
+ ):
146
+ do_classifier_free_guidance = guidance_scale > 1.0
147
+
148
+ image = image.to(device=self.device, dtype=text_embeds.dtype)
149
+ latent = self.vae.encode(image).latent_dist.sample(generator)
150
+
151
+ latent = self.vae.config.scaling_factor * latent
152
+
153
+ coupled_latents = [latent.clone(), latent.clone()]
154
+
155
+ for i, t in tqdm(enumerate(timesteps), total=len(timesteps)):
156
+ coupled_latents = self.noise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
157
+
158
+ # j - model_input index, k - base index
159
+ for j in range(2):
160
+ k = j ^ 1
161
+
162
+ if self.leapfrog_steps:
163
+ if i % 2 == 0:
164
+ k, j = j, k
165
+
166
+ model_input = coupled_latents[j]
167
+ base = coupled_latents[k]
168
+
169
+ latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
170
+
171
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeds).sample
172
+
173
+ if do_classifier_free_guidance:
174
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
175
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
176
+
177
+ base, model_input = self.noise_step(
178
+ base=base,
179
+ model_input=model_input,
180
+ model_output=noise_pred,
181
+ timestep=t,
182
+ )
183
+
184
+ coupled_latents[k] = model_input
185
+
186
+ return coupled_latents
187
+
188
+ @torch.no_grad()
189
+ def __call__(
190
+ self,
191
+ base_prompt: str,
192
+ target_prompt: str,
193
+ image: Image.Image,
194
+ guidance_scale: float = 3.0,
195
+ num_inference_steps: int = 50,
196
+ strength: float = 0.8,
197
+ negative_prompt: Optional[str] = None,
198
+ generator: Optional[torch.Generator] = None,
199
+ output_type: Optional[str] = "pil",
200
+ ):
201
+ do_classifier_free_guidance = guidance_scale > 1.0
202
+
203
+ image = self.image_processor.preprocess(image)
204
+
205
+ base_embeds = self._encode_prompt(base_prompt, negative_prompt, do_classifier_free_guidance)
206
+ target_embeds = self._encode_prompt(target_prompt, negative_prompt, do_classifier_free_guidance)
207
+
208
+ self.scheduler.set_timesteps(num_inference_steps, self.device)
209
+
210
+ t_limit = num_inference_steps - int(num_inference_steps * strength)
211
+ fwd_timesteps = self.scheduler.timesteps[t_limit:]
212
+ bwd_timesteps = fwd_timesteps.flip(0)
213
+
214
+ coupled_latents = self.prepare_latents(image, base_embeds, bwd_timesteps, guidance_scale, generator)
215
+
216
+ for i, t in tqdm(enumerate(fwd_timesteps), total=len(fwd_timesteps)):
217
+ # j - model_input index, k - base index
218
+ for k in range(2):
219
+ j = k ^ 1
220
+
221
+ if self.leapfrog_steps:
222
+ if i % 2 == 1:
223
+ k, j = j, k
224
+
225
+ model_input = coupled_latents[j]
226
+ base = coupled_latents[k]
227
+
228
+ latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
229
+
230
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=target_embeds).sample
231
+
232
+ if do_classifier_free_guidance:
233
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
234
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
235
+
236
+ base, model_input = self.denoise_step(
237
+ base=base,
238
+ model_input=model_input,
239
+ model_output=noise_pred,
240
+ timestep=t,
241
+ )
242
+
243
+ coupled_latents[k] = model_input
244
+
245
+ coupled_latents = self.denoise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
246
+
247
+ # either one is fine
248
+ final_latent = coupled_latents[0]
249
+
250
+ if output_type not in ["latent", "pt", "np", "pil"]:
251
+ deprecation_message = (
252
+ f"the output_type {output_type} is outdated. Please make sure to set it to one of these instead: "
253
+ "`pil`, `np`, `pt`, `latent`"
254
+ )
255
+ deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
256
+ output_type = "np"
257
+
258
+ if output_type == "latent":
259
+ image = final_latent
260
+ else:
261
+ image = self.decode_latents(final_latent)
262
+ image = self.image_processor.postprocess(image, output_type=output_type)
263
+
264
+ return image
v0.24.0/iadb.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Union
2
+
3
+ import torch
4
+
5
+ from diffusers import DiffusionPipeline
6
+ from diffusers.configuration_utils import ConfigMixin
7
+ from diffusers.pipelines.pipeline_utils import ImagePipelineOutput
8
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
9
+
10
+
11
+ class IADBScheduler(SchedulerMixin, ConfigMixin):
12
+ """
13
+ IADBScheduler is a scheduler for the Iterative α-(de)Blending denoising method. It is simple and minimalist.
14
+
15
+ For more details, see the original paper: https://arxiv.org/abs/2305.03486 and the blog post: https://ggx-research.github.io/publication/2023/05/10/publication-iadb.html
16
+ """
17
+
18
+ def step(
19
+ self,
20
+ model_output: torch.FloatTensor,
21
+ timestep: int,
22
+ x_alpha: torch.FloatTensor,
23
+ ) -> torch.FloatTensor:
24
+ """
25
+ Predict the sample at the previous timestep by reversing the ODE. Core function to propagate the diffusion
26
+ process from the learned model outputs (most often the predicted noise).
27
+
28
+ Args:
29
+ model_output (`torch.FloatTensor`): direct output from learned diffusion model. It is the direction from x0 to x1.
30
+ timestep (`float`): current timestep in the diffusion chain.
31
+ x_alpha (`torch.FloatTensor`): x_alpha sample for the current timestep
32
+
33
+ Returns:
34
+ `torch.FloatTensor`: the sample at the previous timestep
35
+
36
+ """
37
+ if self.num_inference_steps is None:
38
+ raise ValueError(
39
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
40
+ )
41
+
42
+ alpha = timestep / self.num_inference_steps
43
+ alpha_next = (timestep + 1) / self.num_inference_steps
44
+
45
+ d = model_output
46
+
47
+ x_alpha = x_alpha + (alpha_next - alpha) * d
48
+
49
+ return x_alpha
50
+
51
+ def set_timesteps(self, num_inference_steps: int):
52
+ self.num_inference_steps = num_inference_steps
53
+
54
+ def add_noise(
55
+ self,
56
+ original_samples: torch.FloatTensor,
57
+ noise: torch.FloatTensor,
58
+ alpha: torch.FloatTensor,
59
+ ) -> torch.FloatTensor:
60
+ return original_samples * alpha + noise * (1 - alpha)
61
+
62
+ def __len__(self):
63
+ return self.config.num_train_timesteps
64
+
65
+
66
+ class IADBPipeline(DiffusionPipeline):
67
+ r"""
68
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
69
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
70
+
71
+ Parameters:
72
+ unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
73
+ scheduler ([`SchedulerMixin`]):
74
+ A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
75
+ [`DDPMScheduler`], or [`DDIMScheduler`].
76
+ """
77
+
78
+ def __init__(self, unet, scheduler):
79
+ super().__init__()
80
+
81
+ self.register_modules(unet=unet, scheduler=scheduler)
82
+
83
+ @torch.no_grad()
84
+ def __call__(
85
+ self,
86
+ batch_size: int = 1,
87
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
88
+ num_inference_steps: int = 50,
89
+ output_type: Optional[str] = "pil",
90
+ return_dict: bool = True,
91
+ ) -> Union[ImagePipelineOutput, Tuple]:
92
+ r"""
93
+ Args:
94
+ batch_size (`int`, *optional*, defaults to 1):
95
+ The number of images to generate.
96
+ num_inference_steps (`int`, *optional*, defaults to 50):
97
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
98
+ expense of slower inference.
99
+ output_type (`str`, *optional*, defaults to `"pil"`):
100
+ The output format of the generate image. Choose between
101
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
102
+ return_dict (`bool`, *optional*, defaults to `True`):
103
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
104
+
105
+ Returns:
106
+ [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
107
+ True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
108
+ """
109
+
110
+ # Sample gaussian noise to begin loop
111
+ if isinstance(self.unet.config.sample_size, int):
112
+ image_shape = (
113
+ batch_size,
114
+ self.unet.config.in_channels,
115
+ self.unet.config.sample_size,
116
+ self.unet.config.sample_size,
117
+ )
118
+ else:
119
+ image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
120
+
121
+ if isinstance(generator, list) and len(generator) != batch_size:
122
+ raise ValueError(
123
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
124
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
125
+ )
126
+
127
+ image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)
128
+
129
+ # set step values
130
+ self.scheduler.set_timesteps(num_inference_steps)
131
+ x_alpha = image.clone()
132
+ for t in self.progress_bar(range(num_inference_steps)):
133
+ alpha = t / num_inference_steps
134
+
135
+ # 1. predict noise model_output
136
+ model_output = self.unet(x_alpha, torch.tensor(alpha, device=x_alpha.device)).sample
137
+
138
+ # 2. step
139
+ x_alpha = self.scheduler.step(model_output, t, x_alpha)
140
+
141
+ image = (x_alpha * 0.5 + 0.5).clamp(0, 1)
142
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
143
+ if output_type == "pil":
144
+ image = self.numpy_to_pil(image)
145
+
146
+ if not return_dict:
147
+ return (image,)
148
+
149
+ return ImagePipelineOutput(images=image)
v0.24.0/imagic_stable_diffusion.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ modeled after the textual_inversion.py / train_dreambooth.py and the work
3
+ of justinpinkney here: https://github.com/justinpinkney/stable-diffusion/blob/main/notebooks/imagic.ipynb
4
+ """
5
+ import inspect
6
+ import warnings
7
+ from typing import List, Optional, Union
8
+
9
+ import numpy as np
10
+ import PIL.Image
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from accelerate import Accelerator
14
+
15
+ # TODO: remove and import from diffusers.utils when the new version of diffusers is released
16
+ from packaging import version
17
+ from tqdm.auto import tqdm
18
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
19
+
20
+ from diffusers import DiffusionPipeline
21
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
22
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
23
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
24
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
25
+ from diffusers.utils import logging
26
+
27
+
28
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
29
+ PIL_INTERPOLATION = {
30
+ "linear": PIL.Image.Resampling.BILINEAR,
31
+ "bilinear": PIL.Image.Resampling.BILINEAR,
32
+ "bicubic": PIL.Image.Resampling.BICUBIC,
33
+ "lanczos": PIL.Image.Resampling.LANCZOS,
34
+ "nearest": PIL.Image.Resampling.NEAREST,
35
+ }
36
+ else:
37
+ PIL_INTERPOLATION = {
38
+ "linear": PIL.Image.LINEAR,
39
+ "bilinear": PIL.Image.BILINEAR,
40
+ "bicubic": PIL.Image.BICUBIC,
41
+ "lanczos": PIL.Image.LANCZOS,
42
+ "nearest": PIL.Image.NEAREST,
43
+ }
44
+ # ------------------------------------------------------------------------------
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+
49
+ def preprocess(image):
50
+ w, h = image.size
51
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
52
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
53
+ image = np.array(image).astype(np.float32) / 255.0
54
+ image = image[None].transpose(0, 3, 1, 2)
55
+ image = torch.from_numpy(image)
56
+ return 2.0 * image - 1.0
57
+
58
+
59
+ class ImagicStableDiffusionPipeline(DiffusionPipeline):
60
+ r"""
61
+ Pipeline for imagic image editing.
62
+ See paper here: https://arxiv.org/pdf/2210.09276.pdf
63
+
64
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
65
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
66
+ Args:
67
+ vae ([`AutoencoderKL`]):
68
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
69
+ text_encoder ([`CLIPTextModel`]):
70
+ Frozen text-encoder. Stable Diffusion uses the text portion of
71
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
72
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
73
+ tokenizer (`CLIPTokenizer`):
74
+ Tokenizer of class
75
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
76
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
77
+ scheduler ([`SchedulerMixin`]):
78
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
79
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
80
+ safety_checker ([`StableDiffusionSafetyChecker`]):
81
+ Classification module that estimates whether generated images could be considered offsensive or harmful.
82
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
83
+ feature_extractor ([`CLIPImageProcessor`]):
84
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ vae: AutoencoderKL,
90
+ text_encoder: CLIPTextModel,
91
+ tokenizer: CLIPTokenizer,
92
+ unet: UNet2DConditionModel,
93
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
94
+ safety_checker: StableDiffusionSafetyChecker,
95
+ feature_extractor: CLIPImageProcessor,
96
+ ):
97
+ super().__init__()
98
+ self.register_modules(
99
+ vae=vae,
100
+ text_encoder=text_encoder,
101
+ tokenizer=tokenizer,
102
+ unet=unet,
103
+ scheduler=scheduler,
104
+ safety_checker=safety_checker,
105
+ feature_extractor=feature_extractor,
106
+ )
107
+
108
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
109
+ r"""
110
+ Enable sliced attention computation.
111
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
112
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
113
+ Args:
114
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
115
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
116
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
117
+ `attention_head_dim` must be a multiple of `slice_size`.
118
+ """
119
+ if slice_size == "auto":
120
+ # half the attention head size is usually a good trade-off between
121
+ # speed and memory
122
+ slice_size = self.unet.config.attention_head_dim // 2
123
+ self.unet.set_attention_slice(slice_size)
124
+
125
+ def disable_attention_slicing(self):
126
+ r"""
127
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
128
+ back to computing attention in one step.
129
+ """
130
+ # set slice_size = `None` to disable `attention slicing`
131
+ self.enable_attention_slicing(None)
132
+
133
+ def train(
134
+ self,
135
+ prompt: Union[str, List[str]],
136
+ image: Union[torch.FloatTensor, PIL.Image.Image],
137
+ height: Optional[int] = 512,
138
+ width: Optional[int] = 512,
139
+ generator: Optional[torch.Generator] = None,
140
+ embedding_learning_rate: float = 0.001,
141
+ diffusion_model_learning_rate: float = 2e-6,
142
+ text_embedding_optimization_steps: int = 500,
143
+ model_fine_tuning_optimization_steps: int = 1000,
144
+ **kwargs,
145
+ ):
146
+ r"""
147
+ Function invoked when calling the pipeline for generation.
148
+ Args:
149
+ prompt (`str` or `List[str]`):
150
+ The prompt or prompts to guide the image generation.
151
+ height (`int`, *optional*, defaults to 512):
152
+ The height in pixels of the generated image.
153
+ width (`int`, *optional*, defaults to 512):
154
+ The width in pixels of the generated image.
155
+ num_inference_steps (`int`, *optional*, defaults to 50):
156
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
157
+ expense of slower inference.
158
+ guidance_scale (`float`, *optional*, defaults to 7.5):
159
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
160
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
161
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
162
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
163
+ usually at the expense of lower image quality.
164
+ eta (`float`, *optional*, defaults to 0.0):
165
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
166
+ [`schedulers.DDIMScheduler`], will be ignored for others.
167
+ generator (`torch.Generator`, *optional*):
168
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
169
+ deterministic.
170
+ latents (`torch.FloatTensor`, *optional*):
171
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
172
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
173
+ tensor will ge generated by sampling using the supplied random `generator`.
174
+ output_type (`str`, *optional*, defaults to `"pil"`):
175
+ The output format of the generate image. Choose between
176
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
177
+ return_dict (`bool`, *optional*, defaults to `True`):
178
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
179
+ plain tuple.
180
+ Returns:
181
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
182
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
183
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
184
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
185
+ (nsfw) content, according to the `safety_checker`.
186
+ """
187
+ accelerator = Accelerator(
188
+ gradient_accumulation_steps=1,
189
+ mixed_precision="fp16",
190
+ )
191
+
192
+ if "torch_device" in kwargs:
193
+ device = kwargs.pop("torch_device")
194
+ warnings.warn(
195
+ "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
196
+ " Consider using `pipe.to(torch_device)` instead."
197
+ )
198
+
199
+ if device is None:
200
+ device = "cuda" if torch.cuda.is_available() else "cpu"
201
+ self.to(device)
202
+
203
+ if height % 8 != 0 or width % 8 != 0:
204
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
205
+
206
+ # Freeze vae and unet
207
+ self.vae.requires_grad_(False)
208
+ self.unet.requires_grad_(False)
209
+ self.text_encoder.requires_grad_(False)
210
+ self.unet.eval()
211
+ self.vae.eval()
212
+ self.text_encoder.eval()
213
+
214
+ if accelerator.is_main_process:
215
+ accelerator.init_trackers(
216
+ "imagic",
217
+ config={
218
+ "embedding_learning_rate": embedding_learning_rate,
219
+ "text_embedding_optimization_steps": text_embedding_optimization_steps,
220
+ },
221
+ )
222
+
223
+ # get text embeddings for prompt
224
+ text_input = self.tokenizer(
225
+ prompt,
226
+ padding="max_length",
227
+ max_length=self.tokenizer.model_max_length,
228
+ truncation=True,
229
+ return_tensors="pt",
230
+ )
231
+ text_embeddings = torch.nn.Parameter(
232
+ self.text_encoder(text_input.input_ids.to(self.device))[0], requires_grad=True
233
+ )
234
+ text_embeddings = text_embeddings.detach()
235
+ text_embeddings.requires_grad_()
236
+ text_embeddings_orig = text_embeddings.clone()
237
+
238
+ # Initialize the optimizer
239
+ optimizer = torch.optim.Adam(
240
+ [text_embeddings], # only optimize the embeddings
241
+ lr=embedding_learning_rate,
242
+ )
243
+
244
+ if isinstance(image, PIL.Image.Image):
245
+ image = preprocess(image)
246
+
247
+ latents_dtype = text_embeddings.dtype
248
+ image = image.to(device=self.device, dtype=latents_dtype)
249
+ init_latent_image_dist = self.vae.encode(image).latent_dist
250
+ image_latents = init_latent_image_dist.sample(generator=generator)
251
+ image_latents = 0.18215 * image_latents
252
+
253
+ progress_bar = tqdm(range(text_embedding_optimization_steps), disable=not accelerator.is_local_main_process)
254
+ progress_bar.set_description("Steps")
255
+
256
+ global_step = 0
257
+
258
+ logger.info("First optimizing the text embedding to better reconstruct the init image")
259
+ for _ in range(text_embedding_optimization_steps):
260
+ with accelerator.accumulate(text_embeddings):
261
+ # Sample noise that we'll add to the latents
262
+ noise = torch.randn(image_latents.shape).to(image_latents.device)
263
+ timesteps = torch.randint(1000, (1,), device=image_latents.device)
264
+
265
+ # Add noise to the latents according to the noise magnitude at each timestep
266
+ # (this is the forward diffusion process)
267
+ noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
268
+
269
+ # Predict the noise residual
270
+ noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
271
+
272
+ loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
273
+ accelerator.backward(loss)
274
+
275
+ optimizer.step()
276
+ optimizer.zero_grad()
277
+
278
+ # Checks if the accelerator has performed an optimization step behind the scenes
279
+ if accelerator.sync_gradients:
280
+ progress_bar.update(1)
281
+ global_step += 1
282
+
283
+ logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
284
+ progress_bar.set_postfix(**logs)
285
+ accelerator.log(logs, step=global_step)
286
+
287
+ accelerator.wait_for_everyone()
288
+
289
+ text_embeddings.requires_grad_(False)
290
+
291
+ # Now we fine tune the unet to better reconstruct the image
292
+ self.unet.requires_grad_(True)
293
+ self.unet.train()
294
+ optimizer = torch.optim.Adam(
295
+ self.unet.parameters(), # only optimize unet
296
+ lr=diffusion_model_learning_rate,
297
+ )
298
+ progress_bar = tqdm(range(model_fine_tuning_optimization_steps), disable=not accelerator.is_local_main_process)
299
+
300
+ logger.info("Next fine tuning the entire model to better reconstruct the init image")
301
+ for _ in range(model_fine_tuning_optimization_steps):
302
+ with accelerator.accumulate(self.unet.parameters()):
303
+ # Sample noise that we'll add to the latents
304
+ noise = torch.randn(image_latents.shape).to(image_latents.device)
305
+ timesteps = torch.randint(1000, (1,), device=image_latents.device)
306
+
307
+ # Add noise to the latents according to the noise magnitude at each timestep
308
+ # (this is the forward diffusion process)
309
+ noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
310
+
311
+ # Predict the noise residual
312
+ noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
313
+
314
+ loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
315
+ accelerator.backward(loss)
316
+
317
+ optimizer.step()
318
+ optimizer.zero_grad()
319
+
320
+ # Checks if the accelerator has performed an optimization step behind the scenes
321
+ if accelerator.sync_gradients:
322
+ progress_bar.update(1)
323
+ global_step += 1
324
+
325
+ logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
326
+ progress_bar.set_postfix(**logs)
327
+ accelerator.log(logs, step=global_step)
328
+
329
+ accelerator.wait_for_everyone()
330
+ self.text_embeddings_orig = text_embeddings_orig
331
+ self.text_embeddings = text_embeddings
332
+
333
+ @torch.no_grad()
334
+ def __call__(
335
+ self,
336
+ alpha: float = 1.2,
337
+ height: Optional[int] = 512,
338
+ width: Optional[int] = 512,
339
+ num_inference_steps: Optional[int] = 50,
340
+ generator: Optional[torch.Generator] = None,
341
+ output_type: Optional[str] = "pil",
342
+ return_dict: bool = True,
343
+ guidance_scale: float = 7.5,
344
+ eta: float = 0.0,
345
+ ):
346
+ r"""
347
+ Function invoked when calling the pipeline for generation.
348
+ Args:
349
+ prompt (`str` or `List[str]`):
350
+ The prompt or prompts to guide the image generation.
351
+ height (`int`, *optional*, defaults to 512):
352
+ The height in pixels of the generated image.
353
+ width (`int`, *optional*, defaults to 512):
354
+ The width in pixels of the generated image.
355
+ num_inference_steps (`int`, *optional*, defaults to 50):
356
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
357
+ expense of slower inference.
358
+ guidance_scale (`float`, *optional*, defaults to 7.5):
359
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
360
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
361
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
362
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
363
+ usually at the expense of lower image quality.
364
+ eta (`float`, *optional*, defaults to 0.0):
365
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
366
+ [`schedulers.DDIMScheduler`], will be ignored for others.
367
+ generator (`torch.Generator`, *optional*):
368
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
369
+ deterministic.
370
+ latents (`torch.FloatTensor`, *optional*):
371
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
372
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
373
+ tensor will ge generated by sampling using the supplied random `generator`.
374
+ output_type (`str`, *optional*, defaults to `"pil"`):
375
+ The output format of the generate image. Choose between
376
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
377
+ return_dict (`bool`, *optional*, defaults to `True`):
378
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
379
+ plain tuple.
380
+ Returns:
381
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
382
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
383
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
384
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
385
+ (nsfw) content, according to the `safety_checker`.
386
+ """
387
+ if height % 8 != 0 or width % 8 != 0:
388
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
389
+ if self.text_embeddings is None:
390
+ raise ValueError("Please run the pipe.train() before trying to generate an image.")
391
+ if self.text_embeddings_orig is None:
392
+ raise ValueError("Please run the pipe.train() before trying to generate an image.")
393
+
394
+ text_embeddings = alpha * self.text_embeddings_orig + (1 - alpha) * self.text_embeddings
395
+
396
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
397
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
398
+ # corresponds to doing no classifier free guidance.
399
+ do_classifier_free_guidance = guidance_scale > 1.0
400
+ # get unconditional embeddings for classifier free guidance
401
+ if do_classifier_free_guidance:
402
+ uncond_tokens = [""]
403
+ max_length = self.tokenizer.model_max_length
404
+ uncond_input = self.tokenizer(
405
+ uncond_tokens,
406
+ padding="max_length",
407
+ max_length=max_length,
408
+ truncation=True,
409
+ return_tensors="pt",
410
+ )
411
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
412
+
413
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
414
+ seq_len = uncond_embeddings.shape[1]
415
+ uncond_embeddings = uncond_embeddings.view(1, seq_len, -1)
416
+
417
+ # For classifier free guidance, we need to do two forward passes.
418
+ # Here we concatenate the unconditional and text embeddings into a single batch
419
+ # to avoid doing two forward passes
420
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
421
+
422
+ # get the initial random noise unless the user supplied it
423
+
424
+ # Unlike in other pipelines, latents need to be generated in the target device
425
+ # for 1-to-1 results reproducibility with the CompVis implementation.
426
+ # However this currently doesn't work in `mps`.
427
+ latents_shape = (1, self.unet.config.in_channels, height // 8, width // 8)
428
+ latents_dtype = text_embeddings.dtype
429
+ if self.device.type == "mps":
430
+ # randn does not exist on mps
431
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
432
+ self.device
433
+ )
434
+ else:
435
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
436
+
437
+ # set timesteps
438
+ self.scheduler.set_timesteps(num_inference_steps)
439
+
440
+ # Some schedulers like PNDM have timesteps as arrays
441
+ # It's more optimized to move all timesteps to correct device beforehand
442
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
443
+
444
+ # scale the initial noise by the standard deviation required by the scheduler
445
+ latents = latents * self.scheduler.init_noise_sigma
446
+
447
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
448
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
449
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
450
+ # and should be between [0, 1]
451
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
452
+ extra_step_kwargs = {}
453
+ if accepts_eta:
454
+ extra_step_kwargs["eta"] = eta
455
+
456
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
457
+ # expand the latents if we are doing classifier free guidance
458
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
459
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
460
+
461
+ # predict the noise residual
462
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
463
+
464
+ # perform guidance
465
+ if do_classifier_free_guidance:
466
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
467
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
468
+
469
+ # compute the previous noisy sample x_t -> x_t-1
470
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
471
+
472
+ latents = 1 / 0.18215 * latents
473
+ image = self.vae.decode(latents).sample
474
+
475
+ image = (image / 2 + 0.5).clamp(0, 1)
476
+
477
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
478
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
479
+
480
+ if self.safety_checker is not None:
481
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
482
+ self.device
483
+ )
484
+ image, has_nsfw_concept = self.safety_checker(
485
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
486
+ )
487
+ else:
488
+ has_nsfw_concept = None
489
+
490
+ if output_type == "pil":
491
+ image = self.numpy_to_pil(image)
492
+
493
+ if not return_dict:
494
+ return (image, has_nsfw_concept)
495
+
496
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/img2img_inpainting.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, List, Optional, Tuple, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+ import torch
7
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
8
+
9
+ from diffusers import DiffusionPipeline
10
+ from diffusers.configuration_utils import FrozenDict
11
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
15
+ from diffusers.utils import deprecate, logging
16
+
17
+
18
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
19
+
20
+
21
+ def prepare_mask_and_masked_image(image, mask):
22
+ image = np.array(image.convert("RGB"))
23
+ image = image[None].transpose(0, 3, 1, 2)
24
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
25
+
26
+ mask = np.array(mask.convert("L"))
27
+ mask = mask.astype(np.float32) / 255.0
28
+ mask = mask[None, None]
29
+ mask[mask < 0.5] = 0
30
+ mask[mask >= 0.5] = 1
31
+ mask = torch.from_numpy(mask)
32
+
33
+ masked_image = image * (mask < 0.5)
34
+
35
+ return mask, masked_image
36
+
37
+
38
+ def check_size(image, height, width):
39
+ if isinstance(image, PIL.Image.Image):
40
+ w, h = image.size
41
+ elif isinstance(image, torch.Tensor):
42
+ *_, h, w = image.shape
43
+
44
+ if h != height or w != width:
45
+ raise ValueError(f"Image size should be {height}x{width}, but got {h}x{w}")
46
+
47
+
48
+ def overlay_inner_image(image, inner_image, paste_offset: Tuple[int] = (0, 0)):
49
+ inner_image = inner_image.convert("RGBA")
50
+ image = image.convert("RGB")
51
+
52
+ image.paste(inner_image, paste_offset, inner_image)
53
+ image = image.convert("RGB")
54
+
55
+ return image
56
+
57
+
58
+ class ImageToImageInpaintingPipeline(DiffusionPipeline):
59
+ r"""
60
+ Pipeline for text-guided image-to-image inpainting using Stable Diffusion. *This is an experimental feature*.
61
+
62
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
63
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
64
+
65
+ Args:
66
+ vae ([`AutoencoderKL`]):
67
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
+ text_encoder ([`CLIPTextModel`]):
69
+ Frozen text-encoder. Stable Diffusion uses the text portion of
70
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
+ tokenizer (`CLIPTokenizer`):
73
+ Tokenizer of class
74
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
+ scheduler ([`SchedulerMixin`]):
77
+ A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
+ safety_checker ([`StableDiffusionSafetyChecker`]):
80
+ Classification module that estimates whether generated images could be considered offensive or harmful.
81
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
+ feature_extractor ([`CLIPImageProcessor`]):
83
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ vae: AutoencoderKL,
89
+ text_encoder: CLIPTextModel,
90
+ tokenizer: CLIPTokenizer,
91
+ unet: UNet2DConditionModel,
92
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
93
+ safety_checker: StableDiffusionSafetyChecker,
94
+ feature_extractor: CLIPImageProcessor,
95
+ ):
96
+ super().__init__()
97
+
98
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
99
+ deprecation_message = (
100
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
101
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
102
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
103
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
104
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
105
+ " file"
106
+ )
107
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
108
+ new_config = dict(scheduler.config)
109
+ new_config["steps_offset"] = 1
110
+ scheduler._internal_dict = FrozenDict(new_config)
111
+
112
+ if safety_checker is None:
113
+ logger.warning(
114
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
115
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
116
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
117
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
118
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
119
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
120
+ )
121
+
122
+ self.register_modules(
123
+ vae=vae,
124
+ text_encoder=text_encoder,
125
+ tokenizer=tokenizer,
126
+ unet=unet,
127
+ scheduler=scheduler,
128
+ safety_checker=safety_checker,
129
+ feature_extractor=feature_extractor,
130
+ )
131
+
132
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
133
+ r"""
134
+ Enable sliced attention computation.
135
+
136
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
137
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
138
+
139
+ Args:
140
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
141
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
142
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
143
+ `attention_head_dim` must be a multiple of `slice_size`.
144
+ """
145
+ if slice_size == "auto":
146
+ # half the attention head size is usually a good trade-off between
147
+ # speed and memory
148
+ slice_size = self.unet.config.attention_head_dim // 2
149
+ self.unet.set_attention_slice(slice_size)
150
+
151
+ def disable_attention_slicing(self):
152
+ r"""
153
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
154
+ back to computing attention in one step.
155
+ """
156
+ # set slice_size = `None` to disable `attention slicing`
157
+ self.enable_attention_slicing(None)
158
+
159
+ @torch.no_grad()
160
+ def __call__(
161
+ self,
162
+ prompt: Union[str, List[str]],
163
+ image: Union[torch.FloatTensor, PIL.Image.Image],
164
+ inner_image: Union[torch.FloatTensor, PIL.Image.Image],
165
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
166
+ height: int = 512,
167
+ width: int = 512,
168
+ num_inference_steps: int = 50,
169
+ guidance_scale: float = 7.5,
170
+ negative_prompt: Optional[Union[str, List[str]]] = None,
171
+ num_images_per_prompt: Optional[int] = 1,
172
+ eta: float = 0.0,
173
+ generator: Optional[torch.Generator] = None,
174
+ latents: Optional[torch.FloatTensor] = None,
175
+ output_type: Optional[str] = "pil",
176
+ return_dict: bool = True,
177
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
178
+ callback_steps: int = 1,
179
+ **kwargs,
180
+ ):
181
+ r"""
182
+ Function invoked when calling the pipeline for generation.
183
+
184
+ Args:
185
+ prompt (`str` or `List[str]`):
186
+ The prompt or prompts to guide the image generation.
187
+ image (`torch.Tensor` or `PIL.Image.Image`):
188
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
189
+ be masked out with `mask_image` and repainted according to `prompt`.
190
+ inner_image (`torch.Tensor` or `PIL.Image.Image`):
191
+ `Image`, or tensor representing an image batch which will be overlayed onto `image`. Non-transparent
192
+ regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
193
+ the last channel representing the alpha channel, which will be used to blend `inner_image` with
194
+ `image`. If not provided, it will be forcibly cast to RGBA.
195
+ mask_image (`PIL.Image.Image`):
196
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
197
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
198
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
199
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
200
+ height (`int`, *optional*, defaults to 512):
201
+ The height in pixels of the generated image.
202
+ width (`int`, *optional*, defaults to 512):
203
+ The width in pixels of the generated image.
204
+ num_inference_steps (`int`, *optional*, defaults to 50):
205
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
206
+ expense of slower inference.
207
+ guidance_scale (`float`, *optional*, defaults to 7.5):
208
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
209
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
210
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
211
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
212
+ usually at the expense of lower image quality.
213
+ negative_prompt (`str` or `List[str]`, *optional*):
214
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
215
+ if `guidance_scale` is less than `1`).
216
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
217
+ The number of images to generate per prompt.
218
+ eta (`float`, *optional*, defaults to 0.0):
219
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
220
+ [`schedulers.DDIMScheduler`], will be ignored for others.
221
+ generator (`torch.Generator`, *optional*):
222
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
223
+ deterministic.
224
+ latents (`torch.FloatTensor`, *optional*):
225
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
226
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
227
+ tensor will ge generated by sampling using the supplied random `generator`.
228
+ output_type (`str`, *optional*, defaults to `"pil"`):
229
+ The output format of the generate image. Choose between
230
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
231
+ return_dict (`bool`, *optional*, defaults to `True`):
232
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
233
+ plain tuple.
234
+ callback (`Callable`, *optional*):
235
+ A function that will be called every `callback_steps` steps during inference. The function will be
236
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
237
+ callback_steps (`int`, *optional*, defaults to 1):
238
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
239
+ called at every step.
240
+
241
+ Returns:
242
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
243
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
244
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
245
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
246
+ (nsfw) content, according to the `safety_checker`.
247
+ """
248
+
249
+ if isinstance(prompt, str):
250
+ batch_size = 1
251
+ elif isinstance(prompt, list):
252
+ batch_size = len(prompt)
253
+ else:
254
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
255
+
256
+ if height % 8 != 0 or width % 8 != 0:
257
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
258
+
259
+ if (callback_steps is None) or (
260
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
261
+ ):
262
+ raise ValueError(
263
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
264
+ f" {type(callback_steps)}."
265
+ )
266
+
267
+ # check if input sizes are correct
268
+ check_size(image, height, width)
269
+ check_size(inner_image, height, width)
270
+ check_size(mask_image, height, width)
271
+
272
+ # get prompt text embeddings
273
+ text_inputs = self.tokenizer(
274
+ prompt,
275
+ padding="max_length",
276
+ max_length=self.tokenizer.model_max_length,
277
+ return_tensors="pt",
278
+ )
279
+ text_input_ids = text_inputs.input_ids
280
+
281
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
282
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
283
+ logger.warning(
284
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
285
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
286
+ )
287
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
288
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
289
+
290
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
291
+ bs_embed, seq_len, _ = text_embeddings.shape
292
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
293
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
294
+
295
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
296
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
297
+ # corresponds to doing no classifier free guidance.
298
+ do_classifier_free_guidance = guidance_scale > 1.0
299
+ # get unconditional embeddings for classifier free guidance
300
+ if do_classifier_free_guidance:
301
+ uncond_tokens: List[str]
302
+ if negative_prompt is None:
303
+ uncond_tokens = [""]
304
+ elif type(prompt) is not type(negative_prompt):
305
+ raise TypeError(
306
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
307
+ f" {type(prompt)}."
308
+ )
309
+ elif isinstance(negative_prompt, str):
310
+ uncond_tokens = [negative_prompt]
311
+ elif batch_size != len(negative_prompt):
312
+ raise ValueError(
313
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
314
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
315
+ " the batch size of `prompt`."
316
+ )
317
+ else:
318
+ uncond_tokens = negative_prompt
319
+
320
+ max_length = text_input_ids.shape[-1]
321
+ uncond_input = self.tokenizer(
322
+ uncond_tokens,
323
+ padding="max_length",
324
+ max_length=max_length,
325
+ truncation=True,
326
+ return_tensors="pt",
327
+ )
328
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
329
+
330
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
331
+ seq_len = uncond_embeddings.shape[1]
332
+ uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
333
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
334
+
335
+ # For classifier free guidance, we need to do two forward passes.
336
+ # Here we concatenate the unconditional and text embeddings into a single batch
337
+ # to avoid doing two forward passes
338
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
339
+
340
+ # get the initial random noise unless the user supplied it
341
+ # Unlike in other pipelines, latents need to be generated in the target device
342
+ # for 1-to-1 results reproducibility with the CompVis implementation.
343
+ # However this currently doesn't work in `mps`.
344
+ num_channels_latents = self.vae.config.latent_channels
345
+ latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
346
+ latents_dtype = text_embeddings.dtype
347
+ if latents is None:
348
+ if self.device.type == "mps":
349
+ # randn does not exist on mps
350
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
351
+ self.device
352
+ )
353
+ else:
354
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
355
+ else:
356
+ if latents.shape != latents_shape:
357
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
358
+ latents = latents.to(self.device)
359
+
360
+ # overlay the inner image
361
+ image = overlay_inner_image(image, inner_image)
362
+
363
+ # prepare mask and masked_image
364
+ mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
365
+ mask = mask.to(device=self.device, dtype=text_embeddings.dtype)
366
+ masked_image = masked_image.to(device=self.device, dtype=text_embeddings.dtype)
367
+
368
+ # resize the mask to latents shape as we concatenate the mask to the latents
369
+ mask = torch.nn.functional.interpolate(mask, size=(height // 8, width // 8))
370
+
371
+ # encode the mask image into latents space so we can concatenate it to the latents
372
+ masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
373
+ masked_image_latents = 0.18215 * masked_image_latents
374
+
375
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
376
+ mask = mask.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
377
+ masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
378
+
379
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
380
+ masked_image_latents = (
381
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
382
+ )
383
+
384
+ num_channels_mask = mask.shape[1]
385
+ num_channels_masked_image = masked_image_latents.shape[1]
386
+
387
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
388
+ raise ValueError(
389
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
390
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
391
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
392
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
393
+ " `pipeline.unet` or your `mask_image` or `image` input."
394
+ )
395
+
396
+ # set timesteps
397
+ self.scheduler.set_timesteps(num_inference_steps)
398
+
399
+ # Some schedulers like PNDM have timesteps as arrays
400
+ # It's more optimized to move all timesteps to correct device beforehand
401
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
402
+
403
+ # scale the initial noise by the standard deviation required by the scheduler
404
+ latents = latents * self.scheduler.init_noise_sigma
405
+
406
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
407
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
408
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
409
+ # and should be between [0, 1]
410
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
411
+ extra_step_kwargs = {}
412
+ if accepts_eta:
413
+ extra_step_kwargs["eta"] = eta
414
+
415
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
416
+ # expand the latents if we are doing classifier free guidance
417
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
418
+
419
+ # concat latents, mask, masked_image_latents in the channel dimension
420
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
421
+
422
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
423
+
424
+ # predict the noise residual
425
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
426
+
427
+ # perform guidance
428
+ if do_classifier_free_guidance:
429
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
430
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
431
+
432
+ # compute the previous noisy sample x_t -> x_t-1
433
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
434
+
435
+ # call the callback, if provided
436
+ if callback is not None and i % callback_steps == 0:
437
+ step_idx = i // getattr(self.scheduler, "order", 1)
438
+ callback(step_idx, t, latents)
439
+
440
+ latents = 1 / 0.18215 * latents
441
+ image = self.vae.decode(latents).sample
442
+
443
+ image = (image / 2 + 0.5).clamp(0, 1)
444
+
445
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
446
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
447
+
448
+ if self.safety_checker is not None:
449
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
450
+ self.device
451
+ )
452
+ image, has_nsfw_concept = self.safety_checker(
453
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
454
+ )
455
+ else:
456
+ has_nsfw_concept = None
457
+
458
+ if output_type == "pil":
459
+ image = self.numpy_to_pil(image)
460
+
461
+ if not return_dict:
462
+ return (image, has_nsfw_concept)
463
+
464
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/interpolate_stable_diffusion.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import time
3
+ from pathlib import Path
4
+ from typing import Callable, List, Optional, Union
5
+
6
+ import numpy as np
7
+ import torch
8
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
9
+
10
+ from diffusers import DiffusionPipeline
11
+ from diffusers.configuration_utils import FrozenDict
12
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
15
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
16
+ from diffusers.utils import deprecate, logging
17
+
18
+
19
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
20
+
21
+
22
+ def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
23
+ """helper function to spherically interpolate two arrays v1 v2"""
24
+
25
+ if not isinstance(v0, np.ndarray):
26
+ inputs_are_torch = True
27
+ input_device = v0.device
28
+ v0 = v0.cpu().numpy()
29
+ v1 = v1.cpu().numpy()
30
+
31
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
32
+ if np.abs(dot) > DOT_THRESHOLD:
33
+ v2 = (1 - t) * v0 + t * v1
34
+ else:
35
+ theta_0 = np.arccos(dot)
36
+ sin_theta_0 = np.sin(theta_0)
37
+ theta_t = theta_0 * t
38
+ sin_theta_t = np.sin(theta_t)
39
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
40
+ s1 = sin_theta_t / sin_theta_0
41
+ v2 = s0 * v0 + s1 * v1
42
+
43
+ if inputs_are_torch:
44
+ v2 = torch.from_numpy(v2).to(input_device)
45
+
46
+ return v2
47
+
48
+
49
+ class StableDiffusionWalkPipeline(DiffusionPipeline):
50
+ r"""
51
+ Pipeline for text-to-image generation using Stable Diffusion.
52
+
53
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
54
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
55
+
56
+ Args:
57
+ vae ([`AutoencoderKL`]):
58
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
59
+ text_encoder ([`CLIPTextModel`]):
60
+ Frozen text-encoder. Stable Diffusion uses the text portion of
61
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
62
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
63
+ tokenizer (`CLIPTokenizer`):
64
+ Tokenizer of class
65
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
66
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
67
+ scheduler ([`SchedulerMixin`]):
68
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
69
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
70
+ safety_checker ([`StableDiffusionSafetyChecker`]):
71
+ Classification module that estimates whether generated images could be considered offensive or harmful.
72
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
73
+ feature_extractor ([`CLIPImageProcessor`]):
74
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ vae: AutoencoderKL,
80
+ text_encoder: CLIPTextModel,
81
+ tokenizer: CLIPTokenizer,
82
+ unet: UNet2DConditionModel,
83
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
84
+ safety_checker: StableDiffusionSafetyChecker,
85
+ feature_extractor: CLIPImageProcessor,
86
+ ):
87
+ super().__init__()
88
+
89
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
90
+ deprecation_message = (
91
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
92
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
93
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
94
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
95
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
96
+ " file"
97
+ )
98
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
99
+ new_config = dict(scheduler.config)
100
+ new_config["steps_offset"] = 1
101
+ scheduler._internal_dict = FrozenDict(new_config)
102
+
103
+ if safety_checker is None:
104
+ logger.warning(
105
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
106
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
107
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
108
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
109
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
110
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
111
+ )
112
+
113
+ self.register_modules(
114
+ vae=vae,
115
+ text_encoder=text_encoder,
116
+ tokenizer=tokenizer,
117
+ unet=unet,
118
+ scheduler=scheduler,
119
+ safety_checker=safety_checker,
120
+ feature_extractor=feature_extractor,
121
+ )
122
+
123
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
124
+ r"""
125
+ Enable sliced attention computation.
126
+
127
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
128
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
129
+
130
+ Args:
131
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
132
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
133
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
134
+ `attention_head_dim` must be a multiple of `slice_size`.
135
+ """
136
+ if slice_size == "auto":
137
+ # half the attention head size is usually a good trade-off between
138
+ # speed and memory
139
+ slice_size = self.unet.config.attention_head_dim // 2
140
+ self.unet.set_attention_slice(slice_size)
141
+
142
+ def disable_attention_slicing(self):
143
+ r"""
144
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
145
+ back to computing attention in one step.
146
+ """
147
+ # set slice_size = `None` to disable `attention slicing`
148
+ self.enable_attention_slicing(None)
149
+
150
+ @torch.no_grad()
151
+ def __call__(
152
+ self,
153
+ prompt: Optional[Union[str, List[str]]] = None,
154
+ height: int = 512,
155
+ width: int = 512,
156
+ num_inference_steps: int = 50,
157
+ guidance_scale: float = 7.5,
158
+ negative_prompt: Optional[Union[str, List[str]]] = None,
159
+ num_images_per_prompt: Optional[int] = 1,
160
+ eta: float = 0.0,
161
+ generator: Optional[torch.Generator] = None,
162
+ latents: Optional[torch.FloatTensor] = None,
163
+ output_type: Optional[str] = "pil",
164
+ return_dict: bool = True,
165
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
166
+ callback_steps: int = 1,
167
+ text_embeddings: Optional[torch.FloatTensor] = None,
168
+ **kwargs,
169
+ ):
170
+ r"""
171
+ Function invoked when calling the pipeline for generation.
172
+
173
+ Args:
174
+ prompt (`str` or `List[str]`, *optional*, defaults to `None`):
175
+ The prompt or prompts to guide the image generation. If not provided, `text_embeddings` is required.
176
+ height (`int`, *optional*, defaults to 512):
177
+ The height in pixels of the generated image.
178
+ width (`int`, *optional*, defaults to 512):
179
+ The width in pixels of the generated image.
180
+ num_inference_steps (`int`, *optional*, defaults to 50):
181
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
182
+ expense of slower inference.
183
+ guidance_scale (`float`, *optional*, defaults to 7.5):
184
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
185
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
186
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
187
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
188
+ usually at the expense of lower image quality.
189
+ negative_prompt (`str` or `List[str]`, *optional*):
190
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
191
+ if `guidance_scale` is less than `1`).
192
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
193
+ The number of images to generate per prompt.
194
+ eta (`float`, *optional*, defaults to 0.0):
195
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
196
+ [`schedulers.DDIMScheduler`], will be ignored for others.
197
+ generator (`torch.Generator`, *optional*):
198
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
199
+ deterministic.
200
+ latents (`torch.FloatTensor`, *optional*):
201
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
202
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
203
+ tensor will ge generated by sampling using the supplied random `generator`.
204
+ output_type (`str`, *optional*, defaults to `"pil"`):
205
+ The output format of the generate image. Choose between
206
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
207
+ return_dict (`bool`, *optional*, defaults to `True`):
208
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
209
+ plain tuple.
210
+ callback (`Callable`, *optional*):
211
+ A function that will be called every `callback_steps` steps during inference. The function will be
212
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
213
+ callback_steps (`int`, *optional*, defaults to 1):
214
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
215
+ called at every step.
216
+ text_embeddings (`torch.FloatTensor`, *optional*, defaults to `None`):
217
+ Pre-generated text embeddings to be used as inputs for image generation. Can be used in place of
218
+ `prompt` to avoid re-computing the embeddings. If not provided, the embeddings will be generated from
219
+ the supplied `prompt`.
220
+
221
+ Returns:
222
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
223
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
224
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
225
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
226
+ (nsfw) content, according to the `safety_checker`.
227
+ """
228
+
229
+ if height % 8 != 0 or width % 8 != 0:
230
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
231
+
232
+ if (callback_steps is None) or (
233
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
234
+ ):
235
+ raise ValueError(
236
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
237
+ f" {type(callback_steps)}."
238
+ )
239
+
240
+ if text_embeddings is None:
241
+ if isinstance(prompt, str):
242
+ batch_size = 1
243
+ elif isinstance(prompt, list):
244
+ batch_size = len(prompt)
245
+ else:
246
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
247
+
248
+ # get prompt text embeddings
249
+ text_inputs = self.tokenizer(
250
+ prompt,
251
+ padding="max_length",
252
+ max_length=self.tokenizer.model_max_length,
253
+ return_tensors="pt",
254
+ )
255
+ text_input_ids = text_inputs.input_ids
256
+
257
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
258
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
259
+ print(
260
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
261
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
262
+ )
263
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
264
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
265
+ else:
266
+ batch_size = text_embeddings.shape[0]
267
+
268
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
269
+ bs_embed, seq_len, _ = text_embeddings.shape
270
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
271
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
272
+
273
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
274
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
275
+ # corresponds to doing no classifier free guidance.
276
+ do_classifier_free_guidance = guidance_scale > 1.0
277
+ # get unconditional embeddings for classifier free guidance
278
+ if do_classifier_free_guidance:
279
+ uncond_tokens: List[str]
280
+ if negative_prompt is None:
281
+ uncond_tokens = [""] * batch_size
282
+ elif type(prompt) is not type(negative_prompt):
283
+ raise TypeError(
284
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
285
+ f" {type(prompt)}."
286
+ )
287
+ elif isinstance(negative_prompt, str):
288
+ uncond_tokens = [negative_prompt]
289
+ elif batch_size != len(negative_prompt):
290
+ raise ValueError(
291
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
292
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
293
+ " the batch size of `prompt`."
294
+ )
295
+ else:
296
+ uncond_tokens = negative_prompt
297
+
298
+ max_length = self.tokenizer.model_max_length
299
+ uncond_input = self.tokenizer(
300
+ uncond_tokens,
301
+ padding="max_length",
302
+ max_length=max_length,
303
+ truncation=True,
304
+ return_tensors="pt",
305
+ )
306
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
307
+
308
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
309
+ seq_len = uncond_embeddings.shape[1]
310
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
311
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
312
+
313
+ # For classifier free guidance, we need to do two forward passes.
314
+ # Here we concatenate the unconditional and text embeddings into a single batch
315
+ # to avoid doing two forward passes
316
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
317
+
318
+ # get the initial random noise unless the user supplied it
319
+
320
+ # Unlike in other pipelines, latents need to be generated in the target device
321
+ # for 1-to-1 results reproducibility with the CompVis implementation.
322
+ # However this currently doesn't work in `mps`.
323
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
324
+ latents_dtype = text_embeddings.dtype
325
+ if latents is None:
326
+ if self.device.type == "mps":
327
+ # randn does not work reproducibly on mps
328
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
329
+ self.device
330
+ )
331
+ else:
332
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
333
+ else:
334
+ if latents.shape != latents_shape:
335
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
336
+ latents = latents.to(self.device)
337
+
338
+ # set timesteps
339
+ self.scheduler.set_timesteps(num_inference_steps)
340
+
341
+ # Some schedulers like PNDM have timesteps as arrays
342
+ # It's more optimized to move all timesteps to correct device beforehand
343
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
344
+
345
+ # scale the initial noise by the standard deviation required by the scheduler
346
+ latents = latents * self.scheduler.init_noise_sigma
347
+
348
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
349
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
350
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
351
+ # and should be between [0, 1]
352
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
353
+ extra_step_kwargs = {}
354
+ if accepts_eta:
355
+ extra_step_kwargs["eta"] = eta
356
+
357
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
358
+ # expand the latents if we are doing classifier free guidance
359
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
360
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
361
+
362
+ # predict the noise residual
363
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
364
+
365
+ # perform guidance
366
+ if do_classifier_free_guidance:
367
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
368
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
369
+
370
+ # compute the previous noisy sample x_t -> x_t-1
371
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
372
+
373
+ # call the callback, if provided
374
+ if callback is not None and i % callback_steps == 0:
375
+ step_idx = i // getattr(self.scheduler, "order", 1)
376
+ callback(step_idx, t, latents)
377
+
378
+ latents = 1 / 0.18215 * latents
379
+ image = self.vae.decode(latents).sample
380
+
381
+ image = (image / 2 + 0.5).clamp(0, 1)
382
+
383
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
384
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
385
+
386
+ if self.safety_checker is not None:
387
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
388
+ self.device
389
+ )
390
+ image, has_nsfw_concept = self.safety_checker(
391
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
392
+ )
393
+ else:
394
+ has_nsfw_concept = None
395
+
396
+ if output_type == "pil":
397
+ image = self.numpy_to_pil(image)
398
+
399
+ if not return_dict:
400
+ return (image, has_nsfw_concept)
401
+
402
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
403
+
404
+ def embed_text(self, text):
405
+ """takes in text and turns it into text embeddings"""
406
+ text_input = self.tokenizer(
407
+ text,
408
+ padding="max_length",
409
+ max_length=self.tokenizer.model_max_length,
410
+ truncation=True,
411
+ return_tensors="pt",
412
+ )
413
+ with torch.no_grad():
414
+ embed = self.text_encoder(text_input.input_ids.to(self.device))[0]
415
+ return embed
416
+
417
+ def get_noise(self, seed, dtype=torch.float32, height=512, width=512):
418
+ """Takes in random seed and returns corresponding noise vector"""
419
+ return torch.randn(
420
+ (1, self.unet.config.in_channels, height // 8, width // 8),
421
+ generator=torch.Generator(device=self.device).manual_seed(seed),
422
+ device=self.device,
423
+ dtype=dtype,
424
+ )
425
+
426
+ def walk(
427
+ self,
428
+ prompts: List[str],
429
+ seeds: List[int],
430
+ num_interpolation_steps: Optional[int] = 6,
431
+ output_dir: Optional[str] = "./dreams",
432
+ name: Optional[str] = None,
433
+ batch_size: Optional[int] = 1,
434
+ height: Optional[int] = 512,
435
+ width: Optional[int] = 512,
436
+ guidance_scale: Optional[float] = 7.5,
437
+ num_inference_steps: Optional[int] = 50,
438
+ eta: Optional[float] = 0.0,
439
+ ) -> List[str]:
440
+ """
441
+ Walks through a series of prompts and seeds, interpolating between them and saving the results to disk.
442
+
443
+ Args:
444
+ prompts (`List[str]`):
445
+ List of prompts to generate images for.
446
+ seeds (`List[int]`):
447
+ List of seeds corresponding to provided prompts. Must be the same length as prompts.
448
+ num_interpolation_steps (`int`, *optional*, defaults to 6):
449
+ Number of interpolation steps to take between prompts.
450
+ output_dir (`str`, *optional*, defaults to `./dreams`):
451
+ Directory to save the generated images to.
452
+ name (`str`, *optional*, defaults to `None`):
453
+ Subdirectory of `output_dir` to save the generated images to. If `None`, the name will
454
+ be the current time.
455
+ batch_size (`int`, *optional*, defaults to 1):
456
+ Number of images to generate at once.
457
+ height (`int`, *optional*, defaults to 512):
458
+ Height of the generated images.
459
+ width (`int`, *optional*, defaults to 512):
460
+ Width of the generated images.
461
+ guidance_scale (`float`, *optional*, defaults to 7.5):
462
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
463
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
464
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
465
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
466
+ usually at the expense of lower image quality.
467
+ num_inference_steps (`int`, *optional*, defaults to 50):
468
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
469
+ expense of slower inference.
470
+ eta (`float`, *optional*, defaults to 0.0):
471
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
472
+ [`schedulers.DDIMScheduler`], will be ignored for others.
473
+
474
+ Returns:
475
+ `List[str]`: List of paths to the generated images.
476
+ """
477
+ if not len(prompts) == len(seeds):
478
+ raise ValueError(
479
+ f"Number of prompts and seeds must be equalGot {len(prompts)} prompts and {len(seeds)} seeds"
480
+ )
481
+
482
+ name = name or time.strftime("%Y%m%d-%H%M%S")
483
+ save_path = Path(output_dir) / name
484
+ save_path.mkdir(exist_ok=True, parents=True)
485
+
486
+ frame_idx = 0
487
+ frame_filepaths = []
488
+ for prompt_a, prompt_b, seed_a, seed_b in zip(prompts, prompts[1:], seeds, seeds[1:]):
489
+ # Embed Text
490
+ embed_a = self.embed_text(prompt_a)
491
+ embed_b = self.embed_text(prompt_b)
492
+
493
+ # Get Noise
494
+ noise_dtype = embed_a.dtype
495
+ noise_a = self.get_noise(seed_a, noise_dtype, height, width)
496
+ noise_b = self.get_noise(seed_b, noise_dtype, height, width)
497
+
498
+ noise_batch, embeds_batch = None, None
499
+ T = np.linspace(0.0, 1.0, num_interpolation_steps)
500
+ for i, t in enumerate(T):
501
+ noise = slerp(float(t), noise_a, noise_b)
502
+ embed = torch.lerp(embed_a, embed_b, t)
503
+
504
+ noise_batch = noise if noise_batch is None else torch.cat([noise_batch, noise], dim=0)
505
+ embeds_batch = embed if embeds_batch is None else torch.cat([embeds_batch, embed], dim=0)
506
+
507
+ batch_is_ready = embeds_batch.shape[0] == batch_size or i + 1 == T.shape[0]
508
+ if batch_is_ready:
509
+ outputs = self(
510
+ latents=noise_batch,
511
+ text_embeddings=embeds_batch,
512
+ height=height,
513
+ width=width,
514
+ guidance_scale=guidance_scale,
515
+ eta=eta,
516
+ num_inference_steps=num_inference_steps,
517
+ )
518
+ noise_batch, embeds_batch = None, None
519
+
520
+ for image in outputs["images"]:
521
+ frame_filepath = str(save_path / f"frame_{frame_idx:06d}.png")
522
+ image.save(frame_filepath)
523
+ frame_filepaths.append(frame_filepath)
524
+ frame_idx += 1
525
+ return frame_filepaths
v0.24.0/latent_consistency_img2img.py ADDED
@@ -0,0 +1,827 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, Dict, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import PIL.Image
24
+ import torch
25
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
26
+
27
+ from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
28
+ from diffusers.configuration_utils import register_to_config
29
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
31
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
32
+ from diffusers.utils import BaseOutput
33
+ from diffusers.utils.torch_utils import randn_tensor
34
+
35
+
36
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
+
38
+
39
+ class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):
40
+ _optional_components = ["scheduler"]
41
+
42
+ def __init__(
43
+ self,
44
+ vae: AutoencoderKL,
45
+ text_encoder: CLIPTextModel,
46
+ tokenizer: CLIPTokenizer,
47
+ unet: UNet2DConditionModel,
48
+ scheduler: "LCMSchedulerWithTimestamp",
49
+ safety_checker: StableDiffusionSafetyChecker,
50
+ feature_extractor: CLIPImageProcessor,
51
+ requires_safety_checker: bool = True,
52
+ ):
53
+ super().__init__()
54
+
55
+ scheduler = (
56
+ scheduler
57
+ if scheduler is not None
58
+ else LCMSchedulerWithTimestamp(
59
+ beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
60
+ )
61
+ )
62
+
63
+ self.register_modules(
64
+ vae=vae,
65
+ text_encoder=text_encoder,
66
+ tokenizer=tokenizer,
67
+ unet=unet,
68
+ scheduler=scheduler,
69
+ safety_checker=safety_checker,
70
+ feature_extractor=feature_extractor,
71
+ )
72
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
73
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
74
+
75
+ def _encode_prompt(
76
+ self,
77
+ prompt,
78
+ device,
79
+ num_images_per_prompt,
80
+ prompt_embeds: None,
81
+ ):
82
+ r"""
83
+ Encodes the prompt into text encoder hidden states.
84
+ Args:
85
+ prompt (`str` or `List[str]`, *optional*):
86
+ prompt to be encoded
87
+ device: (`torch.device`):
88
+ torch device
89
+ num_images_per_prompt (`int`):
90
+ number of images that should be generated per prompt
91
+ prompt_embeds (`torch.FloatTensor`, *optional*):
92
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
93
+ provided, text embeddings will be generated from `prompt` input argument.
94
+ """
95
+
96
+ if prompt is not None and isinstance(prompt, str):
97
+ pass
98
+ elif prompt is not None and isinstance(prompt, list):
99
+ len(prompt)
100
+ else:
101
+ prompt_embeds.shape[0]
102
+
103
+ if prompt_embeds is None:
104
+ text_inputs = self.tokenizer(
105
+ prompt,
106
+ padding="max_length",
107
+ max_length=self.tokenizer.model_max_length,
108
+ truncation=True,
109
+ return_tensors="pt",
110
+ )
111
+ text_input_ids = text_inputs.input_ids
112
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
113
+
114
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
115
+ text_input_ids, untruncated_ids
116
+ ):
117
+ removed_text = self.tokenizer.batch_decode(
118
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
119
+ )
120
+ logger.warning(
121
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
122
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
123
+ )
124
+
125
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
126
+ attention_mask = text_inputs.attention_mask.to(device)
127
+ else:
128
+ attention_mask = None
129
+
130
+ prompt_embeds = self.text_encoder(
131
+ text_input_ids.to(device),
132
+ attention_mask=attention_mask,
133
+ )
134
+ prompt_embeds = prompt_embeds[0]
135
+
136
+ if self.text_encoder is not None:
137
+ prompt_embeds_dtype = self.text_encoder.dtype
138
+ elif self.unet is not None:
139
+ prompt_embeds_dtype = self.unet.dtype
140
+ else:
141
+ prompt_embeds_dtype = prompt_embeds.dtype
142
+
143
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
144
+
145
+ bs_embed, seq_len, _ = prompt_embeds.shape
146
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
147
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
148
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
149
+
150
+ # Don't need to get uncond prompt embedding because of LCM Guided Distillation
151
+ return prompt_embeds
152
+
153
+ def run_safety_checker(self, image, device, dtype):
154
+ if self.safety_checker is None:
155
+ has_nsfw_concept = None
156
+ else:
157
+ if torch.is_tensor(image):
158
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
159
+ else:
160
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
161
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
162
+ image, has_nsfw_concept = self.safety_checker(
163
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
164
+ )
165
+ return image, has_nsfw_concept
166
+
167
+ def prepare_latents(
168
+ self,
169
+ image,
170
+ timestep,
171
+ batch_size,
172
+ num_channels_latents,
173
+ height,
174
+ width,
175
+ dtype,
176
+ device,
177
+ latents=None,
178
+ generator=None,
179
+ ):
180
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
181
+
182
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
183
+ raise ValueError(
184
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
185
+ )
186
+
187
+ image = image.to(device=device, dtype=dtype)
188
+
189
+ # batch_size = batch_size * num_images_per_prompt
190
+
191
+ if image.shape[1] == 4:
192
+ init_latents = image
193
+
194
+ else:
195
+ if isinstance(generator, list) and len(generator) != batch_size:
196
+ raise ValueError(
197
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
198
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
199
+ )
200
+
201
+ elif isinstance(generator, list):
202
+ init_latents = [
203
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
204
+ ]
205
+ init_latents = torch.cat(init_latents, dim=0)
206
+ else:
207
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
208
+
209
+ init_latents = self.vae.config.scaling_factor * init_latents
210
+
211
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
212
+ # expand init_latents for batch_size
213
+ (
214
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
215
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
216
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
217
+ " your script to pass as many initial images as text prompts to suppress this warning."
218
+ )
219
+ # deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
220
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
221
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
222
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
223
+ raise ValueError(
224
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
225
+ )
226
+ else:
227
+ init_latents = torch.cat([init_latents], dim=0)
228
+
229
+ shape = init_latents.shape
230
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
231
+
232
+ # get latents
233
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
234
+ latents = init_latents
235
+
236
+ return latents
237
+
238
+ if latents is None:
239
+ latents = torch.randn(shape, dtype=dtype).to(device)
240
+ else:
241
+ latents = latents.to(device)
242
+ # scale the initial noise by the standard deviation required by the scheduler
243
+ latents = latents * self.scheduler.init_noise_sigma
244
+ return latents
245
+
246
+ def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
247
+ """
248
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
249
+ Args:
250
+ timesteps: torch.Tensor: generate embedding vectors at these timesteps
251
+ embedding_dim: int: dimension of the embeddings to generate
252
+ dtype: data type of the generated embeddings
253
+ Returns:
254
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
255
+ """
256
+ assert len(w.shape) == 1
257
+ w = w * 1000.0
258
+
259
+ half_dim = embedding_dim // 2
260
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
261
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
262
+ emb = w.to(dtype)[:, None] * emb[None, :]
263
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
264
+ if embedding_dim % 2 == 1: # zero pad
265
+ emb = torch.nn.functional.pad(emb, (0, 1))
266
+ assert emb.shape == (w.shape[0], embedding_dim)
267
+ return emb
268
+
269
+ def get_timesteps(self, num_inference_steps, strength, device):
270
+ # get the original timestep using init_timestep
271
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
272
+
273
+ t_start = max(num_inference_steps - init_timestep, 0)
274
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
275
+
276
+ return timesteps, num_inference_steps - t_start
277
+
278
+ @torch.no_grad()
279
+ def __call__(
280
+ self,
281
+ prompt: Union[str, List[str]] = None,
282
+ image: PipelineImageInput = None,
283
+ strength: float = 0.8,
284
+ height: Optional[int] = 768,
285
+ width: Optional[int] = 768,
286
+ guidance_scale: float = 7.5,
287
+ num_images_per_prompt: Optional[int] = 1,
288
+ latents: Optional[torch.FloatTensor] = None,
289
+ num_inference_steps: int = 4,
290
+ lcm_origin_steps: int = 50,
291
+ prompt_embeds: Optional[torch.FloatTensor] = None,
292
+ output_type: Optional[str] = "pil",
293
+ return_dict: bool = True,
294
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
295
+ ):
296
+ # 0. Default height and width to unet
297
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
298
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
299
+
300
+ # 2. Define call parameters
301
+ if prompt is not None and isinstance(prompt, str):
302
+ batch_size = 1
303
+ elif prompt is not None and isinstance(prompt, list):
304
+ batch_size = len(prompt)
305
+ else:
306
+ batch_size = prompt_embeds.shape[0]
307
+
308
+ device = self._execution_device
309
+ # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
310
+
311
+ # 3. Encode input prompt
312
+ prompt_embeds = self._encode_prompt(
313
+ prompt,
314
+ device,
315
+ num_images_per_prompt,
316
+ prompt_embeds=prompt_embeds,
317
+ )
318
+
319
+ # 3.5 encode image
320
+ image = self.image_processor.preprocess(image)
321
+
322
+ # 4. Prepare timesteps
323
+ self.scheduler.set_timesteps(strength, num_inference_steps, lcm_origin_steps)
324
+ # timesteps = self.scheduler.timesteps
325
+ # timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device)
326
+ timesteps = self.scheduler.timesteps
327
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
328
+
329
+ print("timesteps: ", timesteps)
330
+
331
+ # 5. Prepare latent variable
332
+ num_channels_latents = self.unet.config.in_channels
333
+ latents = self.prepare_latents(
334
+ image,
335
+ latent_timestep,
336
+ batch_size * num_images_per_prompt,
337
+ num_channels_latents,
338
+ height,
339
+ width,
340
+ prompt_embeds.dtype,
341
+ device,
342
+ latents,
343
+ )
344
+ bs = batch_size * num_images_per_prompt
345
+
346
+ # 6. Get Guidance Scale Embedding
347
+ w = torch.tensor(guidance_scale).repeat(bs)
348
+ w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
349
+
350
+ # 7. LCM MultiStep Sampling Loop:
351
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
352
+ for i, t in enumerate(timesteps):
353
+ ts = torch.full((bs,), t, device=device, dtype=torch.long)
354
+ latents = latents.to(prompt_embeds.dtype)
355
+
356
+ # model prediction (v-prediction, eps, x)
357
+ model_pred = self.unet(
358
+ latents,
359
+ ts,
360
+ timestep_cond=w_embedding,
361
+ encoder_hidden_states=prompt_embeds,
362
+ cross_attention_kwargs=cross_attention_kwargs,
363
+ return_dict=False,
364
+ )[0]
365
+
366
+ # compute the previous noisy sample x_t -> x_t-1
367
+ latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
368
+
369
+ # # call the callback, if provided
370
+ # if i == len(timesteps) - 1:
371
+ progress_bar.update()
372
+
373
+ denoised = denoised.to(prompt_embeds.dtype)
374
+ if not output_type == "latent":
375
+ image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
376
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
377
+ else:
378
+ image = denoised
379
+ has_nsfw_concept = None
380
+
381
+ if has_nsfw_concept is None:
382
+ do_denormalize = [True] * image.shape[0]
383
+ else:
384
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
385
+
386
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
387
+
388
+ if not return_dict:
389
+ return (image, has_nsfw_concept)
390
+
391
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
392
+
393
+
394
+ @dataclass
395
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
396
+ class LCMSchedulerOutput(BaseOutput):
397
+ """
398
+ Output class for the scheduler's `step` function output.
399
+ Args:
400
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
401
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
402
+ denoising loop.
403
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
404
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
405
+ `pred_original_sample` can be used to preview progress or for guidance.
406
+ """
407
+
408
+ prev_sample: torch.FloatTensor
409
+ denoised: Optional[torch.FloatTensor] = None
410
+
411
+
412
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
413
+ def betas_for_alpha_bar(
414
+ num_diffusion_timesteps,
415
+ max_beta=0.999,
416
+ alpha_transform_type="cosine",
417
+ ):
418
+ """
419
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
420
+ (1-beta) over time from t = [0,1].
421
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
422
+ to that part of the diffusion process.
423
+ Args:
424
+ num_diffusion_timesteps (`int`): the number of betas to produce.
425
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
426
+ prevent singularities.
427
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
428
+ Choose from `cosine` or `exp`
429
+ Returns:
430
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
431
+ """
432
+ if alpha_transform_type == "cosine":
433
+
434
+ def alpha_bar_fn(t):
435
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
436
+
437
+ elif alpha_transform_type == "exp":
438
+
439
+ def alpha_bar_fn(t):
440
+ return math.exp(t * -12.0)
441
+
442
+ else:
443
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
444
+
445
+ betas = []
446
+ for i in range(num_diffusion_timesteps):
447
+ t1 = i / num_diffusion_timesteps
448
+ t2 = (i + 1) / num_diffusion_timesteps
449
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
450
+ return torch.tensor(betas, dtype=torch.float32)
451
+
452
+
453
+ def rescale_zero_terminal_snr(betas):
454
+ """
455
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
456
+ Args:
457
+ betas (`torch.FloatTensor`):
458
+ the betas that the scheduler is being initialized with.
459
+ Returns:
460
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
461
+ """
462
+ # Convert betas to alphas_bar_sqrt
463
+ alphas = 1.0 - betas
464
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
465
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
466
+
467
+ # Store old values.
468
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
469
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
470
+
471
+ # Shift so the last timestep is zero.
472
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
473
+
474
+ # Scale so the first timestep is back to the old value.
475
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
476
+
477
+ # Convert alphas_bar_sqrt to betas
478
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
479
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
480
+ alphas = torch.cat([alphas_bar[0:1], alphas])
481
+ betas = 1 - alphas
482
+
483
+ return betas
484
+
485
+
486
+ class LCMSchedulerWithTimestamp(SchedulerMixin, ConfigMixin):
487
+ """
488
+ This class modifies LCMScheduler to add a timestamp argument to set_timesteps
489
+
490
+
491
+ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
492
+ non-Markovian guidance.
493
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
494
+ methods the library implements for all schedulers such as loading and saving.
495
+ Args:
496
+ num_train_timesteps (`int`, defaults to 1000):
497
+ The number of diffusion steps to train the model.
498
+ beta_start (`float`, defaults to 0.0001):
499
+ The starting `beta` value of inference.
500
+ beta_end (`float`, defaults to 0.02):
501
+ The final `beta` value.
502
+ beta_schedule (`str`, defaults to `"linear"`):
503
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
504
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
505
+ trained_betas (`np.ndarray`, *optional*):
506
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
507
+ clip_sample (`bool`, defaults to `True`):
508
+ Clip the predicted sample for numerical stability.
509
+ clip_sample_range (`float`, defaults to 1.0):
510
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
511
+ set_alpha_to_one (`bool`, defaults to `True`):
512
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
513
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
514
+ otherwise it uses the alpha value at step 0.
515
+ steps_offset (`int`, defaults to 0):
516
+ An offset added to the inference steps. You can use a combination of `offset=1` and
517
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
518
+ Diffusion.
519
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
520
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
521
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
522
+ Video](https://imagen.research.google/video/paper.pdf) paper).
523
+ thresholding (`bool`, defaults to `False`):
524
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
525
+ as Stable Diffusion.
526
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
527
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
528
+ sample_max_value (`float`, defaults to 1.0):
529
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
530
+ timestep_spacing (`str`, defaults to `"leading"`):
531
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
532
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
533
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
534
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
535
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
536
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
537
+ """
538
+
539
+ # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
540
+ order = 1
541
+
542
+ @register_to_config
543
+ def __init__(
544
+ self,
545
+ num_train_timesteps: int = 1000,
546
+ beta_start: float = 0.0001,
547
+ beta_end: float = 0.02,
548
+ beta_schedule: str = "linear",
549
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
550
+ clip_sample: bool = True,
551
+ set_alpha_to_one: bool = True,
552
+ steps_offset: int = 0,
553
+ prediction_type: str = "epsilon",
554
+ thresholding: bool = False,
555
+ dynamic_thresholding_ratio: float = 0.995,
556
+ clip_sample_range: float = 1.0,
557
+ sample_max_value: float = 1.0,
558
+ timestep_spacing: str = "leading",
559
+ rescale_betas_zero_snr: bool = False,
560
+ ):
561
+ if trained_betas is not None:
562
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
563
+ elif beta_schedule == "linear":
564
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
565
+ elif beta_schedule == "scaled_linear":
566
+ # this schedule is very specific to the latent diffusion model.
567
+ self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
568
+ elif beta_schedule == "squaredcos_cap_v2":
569
+ # Glide cosine schedule
570
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
571
+ else:
572
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
573
+
574
+ # Rescale for zero SNR
575
+ if rescale_betas_zero_snr:
576
+ self.betas = rescale_zero_terminal_snr(self.betas)
577
+
578
+ self.alphas = 1.0 - self.betas
579
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
580
+
581
+ # At every step in ddim, we are looking into the previous alphas_cumprod
582
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
583
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
584
+ # whether we use the final alpha of the "non-previous" one.
585
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
586
+
587
+ # standard deviation of the initial noise distribution
588
+ self.init_noise_sigma = 1.0
589
+
590
+ # setable values
591
+ self.num_inference_steps = None
592
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
593
+
594
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
595
+ """
596
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
597
+ current timestep.
598
+ Args:
599
+ sample (`torch.FloatTensor`):
600
+ The input sample.
601
+ timestep (`int`, *optional*):
602
+ The current timestep in the diffusion chain.
603
+ Returns:
604
+ `torch.FloatTensor`:
605
+ A scaled input sample.
606
+ """
607
+ return sample
608
+
609
+ def _get_variance(self, timestep, prev_timestep):
610
+ alpha_prod_t = self.alphas_cumprod[timestep]
611
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
612
+ beta_prod_t = 1 - alpha_prod_t
613
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
614
+
615
+ variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
616
+
617
+ return variance
618
+
619
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
620
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
621
+ """
622
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
623
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
624
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
625
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
626
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
627
+ https://arxiv.org/abs/2205.11487
628
+ """
629
+ dtype = sample.dtype
630
+ batch_size, channels, height, width = sample.shape
631
+
632
+ if dtype not in (torch.float32, torch.float64):
633
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
634
+
635
+ # Flatten sample for doing quantile calculation along each image
636
+ sample = sample.reshape(batch_size, channels * height * width)
637
+
638
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
639
+
640
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
641
+ s = torch.clamp(
642
+ s, min=1, max=self.config.sample_max_value
643
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
644
+
645
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
646
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
647
+
648
+ sample = sample.reshape(batch_size, channels, height, width)
649
+ sample = sample.to(dtype)
650
+
651
+ return sample
652
+
653
+ def set_timesteps(
654
+ self, stength, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None
655
+ ):
656
+ """
657
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
658
+ Args:
659
+ num_inference_steps (`int`):
660
+ The number of diffusion steps used when generating samples with a pre-trained model.
661
+ """
662
+
663
+ if num_inference_steps > self.config.num_train_timesteps:
664
+ raise ValueError(
665
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
666
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
667
+ f" maximal {self.config.num_train_timesteps} timesteps."
668
+ )
669
+
670
+ self.num_inference_steps = num_inference_steps
671
+
672
+ # LCM Timesteps Setting: # Linear Spacing
673
+ c = self.config.num_train_timesteps // lcm_origin_steps
674
+ lcm_origin_timesteps = (
675
+ np.asarray(list(range(1, int(lcm_origin_steps * stength) + 1))) * c - 1
676
+ ) # LCM Training Steps Schedule
677
+ skipping_step = len(lcm_origin_timesteps) // num_inference_steps
678
+ timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
679
+
680
+ self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
681
+
682
+ def get_scalings_for_boundary_condition_discrete(self, t):
683
+ self.sigma_data = 0.5 # Default: 0.5
684
+
685
+ # By dividing 0.1: This is almost a delta function at t=0.
686
+ c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
687
+ c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
688
+ return c_skip, c_out
689
+
690
+ def step(
691
+ self,
692
+ model_output: torch.FloatTensor,
693
+ timeindex: int,
694
+ timestep: int,
695
+ sample: torch.FloatTensor,
696
+ eta: float = 0.0,
697
+ use_clipped_model_output: bool = False,
698
+ generator=None,
699
+ variance_noise: Optional[torch.FloatTensor] = None,
700
+ return_dict: bool = True,
701
+ ) -> Union[LCMSchedulerOutput, Tuple]:
702
+ """
703
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
704
+ process from the learned model outputs (most often the predicted noise).
705
+ Args:
706
+ model_output (`torch.FloatTensor`):
707
+ The direct output from learned diffusion model.
708
+ timestep (`float`):
709
+ The current discrete timestep in the diffusion chain.
710
+ sample (`torch.FloatTensor`):
711
+ A current instance of a sample created by the diffusion process.
712
+ eta (`float`):
713
+ The weight of noise for added noise in diffusion step.
714
+ use_clipped_model_output (`bool`, defaults to `False`):
715
+ If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
716
+ because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
717
+ clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
718
+ `use_clipped_model_output` has no effect.
719
+ generator (`torch.Generator`, *optional*):
720
+ A random number generator.
721
+ variance_noise (`torch.FloatTensor`):
722
+ Alternative to generating noise with `generator` by directly providing the noise for the variance
723
+ itself. Useful for methods such as [`CycleDiffusion`].
724
+ return_dict (`bool`, *optional*, defaults to `True`):
725
+ Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
726
+ Returns:
727
+ [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
728
+ If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
729
+ tuple is returned where the first element is the sample tensor.
730
+ """
731
+ if self.num_inference_steps is None:
732
+ raise ValueError(
733
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
734
+ )
735
+
736
+ # 1. get previous step value
737
+ prev_timeindex = timeindex + 1
738
+ if prev_timeindex < len(self.timesteps):
739
+ prev_timestep = self.timesteps[prev_timeindex]
740
+ else:
741
+ prev_timestep = timestep
742
+
743
+ # 2. compute alphas, betas
744
+ alpha_prod_t = self.alphas_cumprod[timestep]
745
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
746
+
747
+ beta_prod_t = 1 - alpha_prod_t
748
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
749
+
750
+ # 3. Get scalings for boundary conditions
751
+ c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
752
+
753
+ # 4. Different Parameterization:
754
+ parameterization = self.config.prediction_type
755
+
756
+ if parameterization == "epsilon": # noise-prediction
757
+ pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
758
+
759
+ elif parameterization == "sample": # x-prediction
760
+ pred_x0 = model_output
761
+
762
+ elif parameterization == "v_prediction": # v-prediction
763
+ pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
764
+
765
+ # 4. Denoise model output using boundary conditions
766
+ denoised = c_out * pred_x0 + c_skip * sample
767
+
768
+ # 5. Sample z ~ N(0, I), For MultiStep Inference
769
+ # Noise is not used for one-step sampling.
770
+ if len(self.timesteps) > 1:
771
+ noise = torch.randn(model_output.shape).to(model_output.device)
772
+ prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
773
+ else:
774
+ prev_sample = denoised
775
+
776
+ if not return_dict:
777
+ return (prev_sample, denoised)
778
+
779
+ return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
780
+
781
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
782
+ def add_noise(
783
+ self,
784
+ original_samples: torch.FloatTensor,
785
+ noise: torch.FloatTensor,
786
+ timesteps: torch.IntTensor,
787
+ ) -> torch.FloatTensor:
788
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
789
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
790
+ timesteps = timesteps.to(original_samples.device)
791
+
792
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
793
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
794
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
795
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
796
+
797
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
798
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
799
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
800
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
801
+
802
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
803
+ return noisy_samples
804
+
805
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
806
+ def get_velocity(
807
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
808
+ ) -> torch.FloatTensor:
809
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
810
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
811
+ timesteps = timesteps.to(sample.device)
812
+
813
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
814
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
815
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
816
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
817
+
818
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
819
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
820
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
821
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
822
+
823
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
824
+ return velocity
825
+
826
+ def __len__(self):
827
+ return self.config.num_train_timesteps
v0.24.0/latent_consistency_interpolate.py ADDED
@@ -0,0 +1,1051 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Any, Callable, Dict, List, Optional, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
7
+
8
+ from diffusers.image_processor import VaeImageProcessor
9
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
10
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
11
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
12
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import LCMScheduler
15
+ from diffusers.utils import (
16
+ USE_PEFT_BACKEND,
17
+ deprecate,
18
+ logging,
19
+ replace_example_docstring,
20
+ scale_lora_layers,
21
+ unscale_lora_layers,
22
+ )
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
27
+
28
+ EXAMPLE_DOC_STRING = """
29
+ Examples:
30
+ ```py
31
+ >>> import torch
32
+ >>> import numpy as np
33
+
34
+ >>> from diffusers import DiffusionPipeline
35
+
36
+ >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_interpolate")
37
+ >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality.
38
+ >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32)
39
+
40
+ >>> prompts = ["A cat", "A dog", "A horse"]
41
+ >>> num_inference_steps = 4
42
+ >>> num_interpolation_steps = 24
43
+ >>> seed = 1337
44
+
45
+ >>> torch.manual_seed(seed)
46
+ >>> np.random.seed(seed)
47
+
48
+ >>> images = pipe(
49
+ prompt=prompts,
50
+ height=512,
51
+ width=512,
52
+ num_inference_steps=num_inference_steps,
53
+ num_interpolation_steps=num_interpolation_steps,
54
+ guidance_scale=8.0,
55
+ embedding_interpolation_type="lerp",
56
+ latent_interpolation_type="slerp",
57
+ process_batch_size=4, # Make it higher or lower based on your GPU memory
58
+ generator=torch.Generator(seed),
59
+ )
60
+
61
+ >>> # Save the images as a video
62
+ >>> import imageio
63
+ >>> from PIL import Image
64
+
65
+ >>> def pil_to_video(images: List[Image.Image], filename: str, fps: int = 60) -> None:
66
+ frames = [np.array(image) for image in images]
67
+ with imageio.get_writer(filename, fps=fps) as video_writer:
68
+ for frame in frames:
69
+ video_writer.append_data(frame)
70
+
71
+ >>> pil_to_video(images, "lcm_interpolate.mp4", fps=24)
72
+ ```
73
+ """
74
+
75
+
76
+ def lerp(
77
+ v0: Union[torch.Tensor, np.ndarray],
78
+ v1: Union[torch.Tensor, np.ndarray],
79
+ t: Union[float, torch.Tensor, np.ndarray],
80
+ ) -> Union[torch.Tensor, np.ndarray]:
81
+ """
82
+ Linearly interpolate between two vectors/tensors.
83
+
84
+ Args:
85
+ v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
86
+ v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
87
+ t: (`float`, `torch.Tensor`, or `np.ndarray`):
88
+ Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
89
+ torch.Tensor, must be one dimensional with values between 0 and 1.
90
+
91
+ Returns:
92
+ Union[torch.Tensor, np.ndarray]
93
+ Interpolated vector/tensor between v0 and v1.
94
+ """
95
+ inputs_are_torch = False
96
+ t_is_float = False
97
+
98
+ if isinstance(v0, torch.Tensor):
99
+ inputs_are_torch = True
100
+ input_device = v0.device
101
+ v0 = v0.cpu().numpy()
102
+ v1 = v1.cpu().numpy()
103
+
104
+ if isinstance(t, torch.Tensor):
105
+ inputs_are_torch = True
106
+ input_device = t.device
107
+ t = t.cpu().numpy()
108
+ elif isinstance(t, float):
109
+ t_is_float = True
110
+ t = np.array([t])
111
+
112
+ t = t[..., None]
113
+ v0 = v0[None, ...]
114
+ v1 = v1[None, ...]
115
+ v2 = (1 - t) * v0 + t * v1
116
+
117
+ if t_is_float and v0.ndim > 1:
118
+ assert v2.shape[0] == 1
119
+ v2 = np.squeeze(v2, axis=0)
120
+ if inputs_are_torch:
121
+ v2 = torch.from_numpy(v2).to(input_device)
122
+
123
+ return v2
124
+
125
+
126
+ def slerp(
127
+ v0: Union[torch.Tensor, np.ndarray],
128
+ v1: Union[torch.Tensor, np.ndarray],
129
+ t: Union[float, torch.Tensor, np.ndarray],
130
+ DOT_THRESHOLD=0.9995,
131
+ ) -> Union[torch.Tensor, np.ndarray]:
132
+ """
133
+ Spherical linear interpolation between two vectors/tensors.
134
+
135
+ Args:
136
+ v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
137
+ v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
138
+ t: (`float`, `torch.Tensor`, or `np.ndarray`):
139
+ Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
140
+ torch.Tensor, must be one dimensional with values between 0 and 1.
141
+ DOT_THRESHOLD (`float`, *optional*, default=0.9995):
142
+ Threshold for when to use linear interpolation instead of spherical interpolation.
143
+
144
+ Returns:
145
+ `torch.Tensor` or `np.ndarray`:
146
+ Interpolated vector/tensor between v0 and v1.
147
+ """
148
+ inputs_are_torch = False
149
+ t_is_float = False
150
+
151
+ if isinstance(v0, torch.Tensor):
152
+ inputs_are_torch = True
153
+ input_device = v0.device
154
+ v0 = v0.cpu().numpy()
155
+ v1 = v1.cpu().numpy()
156
+
157
+ if isinstance(t, torch.Tensor):
158
+ inputs_are_torch = True
159
+ input_device = t.device
160
+ t = t.cpu().numpy()
161
+ elif isinstance(t, float):
162
+ t_is_float = True
163
+ t = np.array([t], dtype=v0.dtype)
164
+
165
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
166
+ if np.abs(dot) > DOT_THRESHOLD:
167
+ # v1 and v2 are close to parallel
168
+ # Use linear interpolation instead
169
+ v2 = lerp(v0, v1, t)
170
+ else:
171
+ theta_0 = np.arccos(dot)
172
+ sin_theta_0 = np.sin(theta_0)
173
+ theta_t = theta_0 * t
174
+ sin_theta_t = np.sin(theta_t)
175
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
176
+ s1 = sin_theta_t / sin_theta_0
177
+ s0 = s0[..., None]
178
+ s1 = s1[..., None]
179
+ v0 = v0[None, ...]
180
+ v1 = v1[None, ...]
181
+ v2 = s0 * v0 + s1 * v1
182
+
183
+ if t_is_float and v0.ndim > 1:
184
+ assert v2.shape[0] == 1
185
+ v2 = np.squeeze(v2, axis=0)
186
+ if inputs_are_torch:
187
+ v2 = torch.from_numpy(v2).to(input_device)
188
+
189
+ return v2
190
+
191
+
192
+ class LatentConsistencyModelWalkPipeline(
193
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
194
+ ):
195
+ r"""
196
+ Pipeline for text-to-image generation using a latent consistency model.
197
+
198
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
199
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
200
+
201
+ The pipeline also inherits the following loading methods:
202
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
203
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
204
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
205
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
206
+
207
+ Args:
208
+ vae ([`AutoencoderKL`]):
209
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
210
+ text_encoder ([`~transformers.CLIPTextModel`]):
211
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
212
+ tokenizer ([`~transformers.CLIPTokenizer`]):
213
+ A `CLIPTokenizer` to tokenize text.
214
+ unet ([`UNet2DConditionModel`]):
215
+ A `UNet2DConditionModel` to denoise the encoded image latents.
216
+ scheduler ([`SchedulerMixin`]):
217
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only
218
+ supports [`LCMScheduler`].
219
+ safety_checker ([`StableDiffusionSafetyChecker`]):
220
+ Classification module that estimates whether generated images could be considered offensive or harmful.
221
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
222
+ about a model's potential harms.
223
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
224
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
225
+ requires_safety_checker (`bool`, *optional*, defaults to `True`):
226
+ Whether the pipeline requires a safety checker component.
227
+ """
228
+
229
+ model_cpu_offload_seq = "text_encoder->unet->vae"
230
+ _optional_components = ["safety_checker", "feature_extractor"]
231
+ _exclude_from_cpu_offload = ["safety_checker"]
232
+ _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"]
233
+
234
+ def __init__(
235
+ self,
236
+ vae: AutoencoderKL,
237
+ text_encoder: CLIPTextModel,
238
+ tokenizer: CLIPTokenizer,
239
+ unet: UNet2DConditionModel,
240
+ scheduler: LCMScheduler,
241
+ safety_checker: StableDiffusionSafetyChecker,
242
+ feature_extractor: CLIPImageProcessor,
243
+ requires_safety_checker: bool = True,
244
+ ):
245
+ super().__init__()
246
+
247
+ if safety_checker is None and requires_safety_checker:
248
+ logger.warning(
249
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
250
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
251
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
252
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
253
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
254
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
255
+ )
256
+
257
+ if safety_checker is not None and feature_extractor is None:
258
+ raise ValueError(
259
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
260
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
261
+ )
262
+
263
+ self.register_modules(
264
+ vae=vae,
265
+ text_encoder=text_encoder,
266
+ tokenizer=tokenizer,
267
+ unet=unet,
268
+ scheduler=scheduler,
269
+ safety_checker=safety_checker,
270
+ feature_extractor=feature_extractor,
271
+ )
272
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
273
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
274
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
275
+
276
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
277
+ def enable_vae_slicing(self):
278
+ r"""
279
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
280
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
281
+ """
282
+ self.vae.enable_slicing()
283
+
284
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
285
+ def disable_vae_slicing(self):
286
+ r"""
287
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
288
+ computing decoding in one step.
289
+ """
290
+ self.vae.disable_slicing()
291
+
292
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
293
+ def enable_vae_tiling(self):
294
+ r"""
295
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
296
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
297
+ processing larger images.
298
+ """
299
+ self.vae.enable_tiling()
300
+
301
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
302
+ def disable_vae_tiling(self):
303
+ r"""
304
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
305
+ computing decoding in one step.
306
+ """
307
+ self.vae.disable_tiling()
308
+
309
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
310
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
311
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
312
+
313
+ The suffixes after the scaling factors represent the stages where they are being applied.
314
+
315
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
316
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
317
+
318
+ Args:
319
+ s1 (`float`):
320
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
321
+ mitigate "oversmoothing effect" in the enhanced denoising process.
322
+ s2 (`float`):
323
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
324
+ mitigate "oversmoothing effect" in the enhanced denoising process.
325
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
326
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
327
+ """
328
+ if not hasattr(self, "unet"):
329
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
330
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
331
+
332
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
333
+ def disable_freeu(self):
334
+ """Disables the FreeU mechanism if enabled."""
335
+ self.unet.disable_freeu()
336
+
337
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
338
+ def encode_prompt(
339
+ self,
340
+ prompt,
341
+ device,
342
+ num_images_per_prompt,
343
+ do_classifier_free_guidance,
344
+ negative_prompt=None,
345
+ prompt_embeds: Optional[torch.FloatTensor] = None,
346
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
347
+ lora_scale: Optional[float] = None,
348
+ clip_skip: Optional[int] = None,
349
+ ):
350
+ r"""
351
+ Encodes the prompt into text encoder hidden states.
352
+
353
+ Args:
354
+ prompt (`str` or `List[str]`, *optional*):
355
+ prompt to be encoded
356
+ device: (`torch.device`):
357
+ torch device
358
+ num_images_per_prompt (`int`):
359
+ number of images that should be generated per prompt
360
+ do_classifier_free_guidance (`bool`):
361
+ whether to use classifier free guidance or not
362
+ negative_prompt (`str` or `List[str]`, *optional*):
363
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
364
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
365
+ less than `1`).
366
+ prompt_embeds (`torch.FloatTensor`, *optional*):
367
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
368
+ provided, text embeddings will be generated from `prompt` input argument.
369
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
370
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
371
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
372
+ argument.
373
+ lora_scale (`float`, *optional*):
374
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
375
+ clip_skip (`int`, *optional*):
376
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
377
+ the output of the pre-final layer will be used for computing the prompt embeddings.
378
+ """
379
+ # set lora scale so that monkey patched LoRA
380
+ # function of text encoder can correctly access it
381
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
382
+ self._lora_scale = lora_scale
383
+
384
+ # dynamically adjust the LoRA scale
385
+ if not USE_PEFT_BACKEND:
386
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
387
+ else:
388
+ scale_lora_layers(self.text_encoder, lora_scale)
389
+
390
+ if prompt is not None and isinstance(prompt, str):
391
+ batch_size = 1
392
+ elif prompt is not None and isinstance(prompt, list):
393
+ batch_size = len(prompt)
394
+ else:
395
+ batch_size = prompt_embeds.shape[0]
396
+
397
+ if prompt_embeds is None:
398
+ # textual inversion: procecss multi-vector tokens if necessary
399
+ if isinstance(self, TextualInversionLoaderMixin):
400
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
401
+
402
+ text_inputs = self.tokenizer(
403
+ prompt,
404
+ padding="max_length",
405
+ max_length=self.tokenizer.model_max_length,
406
+ truncation=True,
407
+ return_tensors="pt",
408
+ )
409
+ text_input_ids = text_inputs.input_ids
410
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
411
+
412
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
413
+ text_input_ids, untruncated_ids
414
+ ):
415
+ removed_text = self.tokenizer.batch_decode(
416
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
417
+ )
418
+ logger.warning(
419
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
420
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
421
+ )
422
+
423
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
424
+ attention_mask = text_inputs.attention_mask.to(device)
425
+ else:
426
+ attention_mask = None
427
+
428
+ if clip_skip is None:
429
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
430
+ prompt_embeds = prompt_embeds[0]
431
+ else:
432
+ prompt_embeds = self.text_encoder(
433
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
434
+ )
435
+ # Access the `hidden_states` first, that contains a tuple of
436
+ # all the hidden states from the encoder layers. Then index into
437
+ # the tuple to access the hidden states from the desired layer.
438
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
439
+ # We also need to apply the final LayerNorm here to not mess with the
440
+ # representations. The `last_hidden_states` that we typically use for
441
+ # obtaining the final prompt representations passes through the LayerNorm
442
+ # layer.
443
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
444
+
445
+ if self.text_encoder is not None:
446
+ prompt_embeds_dtype = self.text_encoder.dtype
447
+ elif self.unet is not None:
448
+ prompt_embeds_dtype = self.unet.dtype
449
+ else:
450
+ prompt_embeds_dtype = prompt_embeds.dtype
451
+
452
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
453
+
454
+ bs_embed, seq_len, _ = prompt_embeds.shape
455
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
456
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
457
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
458
+
459
+ # get unconditional embeddings for classifier free guidance
460
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
461
+ uncond_tokens: List[str]
462
+ if negative_prompt is None:
463
+ uncond_tokens = [""] * batch_size
464
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
465
+ raise TypeError(
466
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
467
+ f" {type(prompt)}."
468
+ )
469
+ elif isinstance(negative_prompt, str):
470
+ uncond_tokens = [negative_prompt]
471
+ elif batch_size != len(negative_prompt):
472
+ raise ValueError(
473
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
474
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
475
+ " the batch size of `prompt`."
476
+ )
477
+ else:
478
+ uncond_tokens = negative_prompt
479
+
480
+ # textual inversion: procecss multi-vector tokens if necessary
481
+ if isinstance(self, TextualInversionLoaderMixin):
482
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
483
+
484
+ max_length = prompt_embeds.shape[1]
485
+ uncond_input = self.tokenizer(
486
+ uncond_tokens,
487
+ padding="max_length",
488
+ max_length=max_length,
489
+ truncation=True,
490
+ return_tensors="pt",
491
+ )
492
+
493
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
494
+ attention_mask = uncond_input.attention_mask.to(device)
495
+ else:
496
+ attention_mask = None
497
+
498
+ negative_prompt_embeds = self.text_encoder(
499
+ uncond_input.input_ids.to(device),
500
+ attention_mask=attention_mask,
501
+ )
502
+ negative_prompt_embeds = negative_prompt_embeds[0]
503
+
504
+ if do_classifier_free_guidance:
505
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
506
+ seq_len = negative_prompt_embeds.shape[1]
507
+
508
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
509
+
510
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
511
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
512
+
513
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
514
+ # Retrieve the original scale by scaling back the LoRA layers
515
+ unscale_lora_layers(self.text_encoder, lora_scale)
516
+
517
+ return prompt_embeds, negative_prompt_embeds
518
+
519
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
520
+ def run_safety_checker(self, image, device, dtype):
521
+ if self.safety_checker is None:
522
+ has_nsfw_concept = None
523
+ else:
524
+ if torch.is_tensor(image):
525
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
526
+ else:
527
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
528
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
529
+ image, has_nsfw_concept = self.safety_checker(
530
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
531
+ )
532
+ return image, has_nsfw_concept
533
+
534
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
535
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
536
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
537
+ if isinstance(generator, list) and len(generator) != batch_size:
538
+ raise ValueError(
539
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
540
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
541
+ )
542
+
543
+ if latents is None:
544
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
545
+ else:
546
+ latents = latents.to(device)
547
+
548
+ # scale the initial noise by the standard deviation required by the scheduler
549
+ latents = latents * self.scheduler.init_noise_sigma
550
+ return latents
551
+
552
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
553
+ """
554
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
555
+
556
+ Args:
557
+ timesteps (`torch.Tensor`):
558
+ generate embedding vectors at these timesteps
559
+ embedding_dim (`int`, *optional*, defaults to 512):
560
+ dimension of the embeddings to generate
561
+ dtype:
562
+ data type of the generated embeddings
563
+
564
+ Returns:
565
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
566
+ """
567
+ assert len(w.shape) == 1
568
+ w = w * 1000.0
569
+
570
+ half_dim = embedding_dim // 2
571
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
572
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
573
+ emb = w.to(dtype)[:, None] * emb[None, :]
574
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
575
+ if embedding_dim % 2 == 1: # zero pad
576
+ emb = torch.nn.functional.pad(emb, (0, 1))
577
+ assert emb.shape == (w.shape[0], embedding_dim)
578
+ return emb
579
+
580
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
581
+ def prepare_extra_step_kwargs(self, generator, eta):
582
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
583
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
584
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
585
+ # and should be between [0, 1]
586
+
587
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
588
+ extra_step_kwargs = {}
589
+ if accepts_eta:
590
+ extra_step_kwargs["eta"] = eta
591
+
592
+ # check if the scheduler accepts generator
593
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
594
+ if accepts_generator:
595
+ extra_step_kwargs["generator"] = generator
596
+ return extra_step_kwargs
597
+
598
+ # Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed
599
+ def check_inputs(
600
+ self,
601
+ prompt: Union[str, List[str]],
602
+ height: int,
603
+ width: int,
604
+ callback_steps: int,
605
+ prompt_embeds: Optional[torch.FloatTensor] = None,
606
+ callback_on_step_end_tensor_inputs=None,
607
+ ):
608
+ if height % 8 != 0 or width % 8 != 0:
609
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
610
+
611
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
612
+ raise ValueError(
613
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
614
+ f" {type(callback_steps)}."
615
+ )
616
+
617
+ if callback_on_step_end_tensor_inputs is not None and not all(
618
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
619
+ ):
620
+ raise ValueError(
621
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
622
+ )
623
+
624
+ if prompt is not None and prompt_embeds is not None:
625
+ raise ValueError(
626
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
627
+ " only forward one of the two."
628
+ )
629
+ elif prompt is None and prompt_embeds is None:
630
+ raise ValueError(
631
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
632
+ )
633
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
634
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
635
+
636
+ @torch.no_grad()
637
+ def interpolate_embedding(
638
+ self,
639
+ start_embedding: torch.FloatTensor,
640
+ end_embedding: torch.FloatTensor,
641
+ num_interpolation_steps: Union[int, List[int]],
642
+ interpolation_type: str,
643
+ ) -> torch.FloatTensor:
644
+ if interpolation_type == "lerp":
645
+ interpolation_fn = lerp
646
+ elif interpolation_type == "slerp":
647
+ interpolation_fn = slerp
648
+ else:
649
+ raise ValueError(
650
+ f"embedding_interpolation_type must be one of ['lerp', 'slerp'], got {interpolation_type}."
651
+ )
652
+
653
+ embedding = torch.cat([start_embedding, end_embedding])
654
+ steps = torch.linspace(0, 1, num_interpolation_steps, dtype=embedding.dtype).cpu().numpy()
655
+ steps = np.expand_dims(steps, axis=tuple(range(1, embedding.ndim)))
656
+ interpolations = []
657
+
658
+ # Interpolate between text embeddings
659
+ # TODO(aryan): Think of a better way of doing this
660
+ # See if it can be done parallelly instead
661
+ for i in range(embedding.shape[0] - 1):
662
+ interpolations.append(interpolation_fn(embedding[i], embedding[i + 1], steps).squeeze(dim=1))
663
+
664
+ interpolations = torch.cat(interpolations)
665
+ return interpolations
666
+
667
+ @torch.no_grad()
668
+ def interpolate_latent(
669
+ self,
670
+ start_latent: torch.FloatTensor,
671
+ end_latent: torch.FloatTensor,
672
+ num_interpolation_steps: Union[int, List[int]],
673
+ interpolation_type: str,
674
+ ) -> torch.FloatTensor:
675
+ if interpolation_type == "lerp":
676
+ interpolation_fn = lerp
677
+ elif interpolation_type == "slerp":
678
+ interpolation_fn = slerp
679
+
680
+ latent = torch.cat([start_latent, end_latent])
681
+ steps = torch.linspace(0, 1, num_interpolation_steps, dtype=latent.dtype).cpu().numpy()
682
+ steps = np.expand_dims(steps, axis=tuple(range(1, latent.ndim)))
683
+ interpolations = []
684
+
685
+ # Interpolate between latents
686
+ # TODO: Think of a better way of doing this
687
+ # See if it can be done parallelly instead
688
+ for i in range(latent.shape[0] - 1):
689
+ interpolations.append(interpolation_fn(latent[i], latent[i + 1], steps).squeeze(dim=1))
690
+
691
+ return torch.cat(interpolations)
692
+
693
+ @property
694
+ def guidance_scale(self):
695
+ return self._guidance_scale
696
+
697
+ @property
698
+ def cross_attention_kwargs(self):
699
+ return self._cross_attention_kwargs
700
+
701
+ @property
702
+ def clip_skip(self):
703
+ return self._clip_skip
704
+
705
+ @property
706
+ def num_timesteps(self):
707
+ return self._num_timesteps
708
+
709
+ @torch.no_grad()
710
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
711
+ def __call__(
712
+ self,
713
+ prompt: Union[str, List[str]] = None,
714
+ height: Optional[int] = None,
715
+ width: Optional[int] = None,
716
+ num_inference_steps: int = 4,
717
+ num_interpolation_steps: int = 8,
718
+ original_inference_steps: int = None,
719
+ guidance_scale: float = 8.5,
720
+ num_images_per_prompt: Optional[int] = 1,
721
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
722
+ latents: Optional[torch.FloatTensor] = None,
723
+ prompt_embeds: Optional[torch.FloatTensor] = None,
724
+ output_type: Optional[str] = "pil",
725
+ return_dict: bool = True,
726
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
727
+ clip_skip: Optional[int] = None,
728
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
729
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
730
+ embedding_interpolation_type: str = "lerp",
731
+ latent_interpolation_type: str = "slerp",
732
+ process_batch_size: int = 4,
733
+ **kwargs,
734
+ ):
735
+ r"""
736
+ The call function to the pipeline for generation.
737
+
738
+ Args:
739
+ prompt (`str` or `List[str]`, *optional*):
740
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
741
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
742
+ The height in pixels of the generated image.
743
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
744
+ The width in pixels of the generated image.
745
+ num_inference_steps (`int`, *optional*, defaults to 50):
746
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
747
+ expense of slower inference.
748
+ original_inference_steps (`int`, *optional*):
749
+ The original number of inference steps use to generate a linearly-spaced timestep schedule, from which
750
+ we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule,
751
+ following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the
752
+ scheduler's `original_inference_steps` attribute.
753
+ guidance_scale (`float`, *optional*, defaults to 7.5):
754
+ A higher guidance scale value encourages the model to generate images closely linked to the text
755
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
756
+ Note that the original latent consistency models paper uses a different CFG formulation where the
757
+ guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale >
758
+ 0`).
759
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
760
+ The number of images to generate per prompt.
761
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
762
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
763
+ generation deterministic.
764
+ latents (`torch.FloatTensor`, *optional*):
765
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
766
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
767
+ tensor is generated by sampling using the supplied random `generator`.
768
+ prompt_embeds (`torch.FloatTensor`, *optional*):
769
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
770
+ provided, text embeddings are generated from the `prompt` input argument.
771
+ output_type (`str`, *optional*, defaults to `"pil"`):
772
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
773
+ return_dict (`bool`, *optional*, defaults to `True`):
774
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
775
+ plain tuple.
776
+ cross_attention_kwargs (`dict`, *optional*):
777
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
778
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
779
+ clip_skip (`int`, *optional*):
780
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
781
+ the output of the pre-final layer will be used for computing the prompt embeddings.
782
+ callback_on_step_end (`Callable`, *optional*):
783
+ A function that calls at the end of each denoising steps during the inference. The function is called
784
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
785
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
786
+ `callback_on_step_end_tensor_inputs`.
787
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
788
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
789
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
790
+ `._callback_tensor_inputs` attribute of your pipeine class.
791
+ embedding_interpolation_type (`str`, *optional*, defaults to `"lerp"`):
792
+ The type of interpolation to use for interpolating between text embeddings. Choose between `"lerp"` and `"slerp"`.
793
+ latent_interpolation_type (`str`, *optional*, defaults to `"slerp"`):
794
+ The type of interpolation to use for interpolating between latents. Choose between `"lerp"` and `"slerp"`.
795
+ process_batch_size (`int`, *optional*, defaults to 4):
796
+ The batch size to use for processing the images. This is useful when generating a large number of images
797
+ and you want to avoid running out of memory.
798
+
799
+ Examples:
800
+
801
+ Returns:
802
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
803
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
804
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
805
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
806
+ "not-safe-for-work" (nsfw) content.
807
+ """
808
+
809
+ callback = kwargs.pop("callback", None)
810
+ callback_steps = kwargs.pop("callback_steps", None)
811
+
812
+ if callback is not None:
813
+ deprecate(
814
+ "callback",
815
+ "1.0.0",
816
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
817
+ )
818
+ if callback_steps is not None:
819
+ deprecate(
820
+ "callback_steps",
821
+ "1.0.0",
822
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
823
+ )
824
+
825
+ # 0. Default height and width to unet
826
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
827
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
828
+
829
+ # 1. Check inputs. Raise error if not correct
830
+ self.check_inputs(prompt, height, width, callback_steps, prompt_embeds, callback_on_step_end_tensor_inputs)
831
+ self._guidance_scale = guidance_scale
832
+ self._clip_skip = clip_skip
833
+ self._cross_attention_kwargs = cross_attention_kwargs
834
+
835
+ # 2. Define call parameters
836
+ if prompt is not None and isinstance(prompt, str):
837
+ batch_size = 1
838
+ elif prompt is not None and isinstance(prompt, list):
839
+ batch_size = len(prompt)
840
+ else:
841
+ batch_size = prompt_embeds.shape[0]
842
+ if batch_size < 2:
843
+ raise ValueError(f"`prompt` must have length of atleast 2 but found {batch_size}")
844
+ if num_images_per_prompt != 1:
845
+ raise ValueError("`num_images_per_prompt` must be `1` as no other value is supported yet")
846
+ if prompt_embeds is not None:
847
+ raise ValueError("`prompt_embeds` must be None since it is not supported yet")
848
+ if latents is not None:
849
+ raise ValueError("`latents` must be None since it is not supported yet")
850
+
851
+ device = self._execution_device
852
+ # do_classifier_free_guidance = guidance_scale > 1.0
853
+
854
+ lora_scale = (
855
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
856
+ )
857
+
858
+ self.scheduler.set_timesteps(num_inference_steps, device, original_inference_steps=original_inference_steps)
859
+ timesteps = self.scheduler.timesteps
860
+ num_channels_latents = self.unet.config.in_channels
861
+ # bs = batch_size * num_images_per_prompt
862
+
863
+ # 3. Encode initial input prompt
864
+ prompt_embeds_1, _ = self.encode_prompt(
865
+ prompt[:1],
866
+ device,
867
+ num_images_per_prompt=num_images_per_prompt,
868
+ do_classifier_free_guidance=False,
869
+ negative_prompt=None,
870
+ prompt_embeds=prompt_embeds,
871
+ negative_prompt_embeds=None,
872
+ lora_scale=lora_scale,
873
+ clip_skip=self.clip_skip,
874
+ )
875
+
876
+ # 4. Prepare initial latent variables
877
+ latents_1 = self.prepare_latents(
878
+ 1,
879
+ num_channels_latents,
880
+ height,
881
+ width,
882
+ prompt_embeds_1.dtype,
883
+ device,
884
+ generator,
885
+ latents,
886
+ )
887
+
888
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None)
889
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
890
+ self._num_timesteps = len(timesteps)
891
+ images = []
892
+
893
+ # 5. Iterate over prompts and perform latent walk. Note that we do this two prompts at a time
894
+ # otherwise the memory usage ends up being too high.
895
+ with self.progress_bar(total=batch_size - 1) as prompt_progress_bar:
896
+ for i in range(1, batch_size):
897
+ # 6. Encode current prompt
898
+ prompt_embeds_2, _ = self.encode_prompt(
899
+ prompt[i : i + 1],
900
+ device,
901
+ num_images_per_prompt=num_images_per_prompt,
902
+ do_classifier_free_guidance=False,
903
+ negative_prompt=None,
904
+ prompt_embeds=prompt_embeds,
905
+ negative_prompt_embeds=None,
906
+ lora_scale=lora_scale,
907
+ clip_skip=self.clip_skip,
908
+ )
909
+
910
+ # 7. Prepare current latent variables
911
+ latents_2 = self.prepare_latents(
912
+ 1,
913
+ num_channels_latents,
914
+ height,
915
+ width,
916
+ prompt_embeds_2.dtype,
917
+ device,
918
+ generator,
919
+ latents,
920
+ )
921
+
922
+ # 8. Interpolate between previous and current prompt embeddings and latents
923
+ inference_embeddings = self.interpolate_embedding(
924
+ start_embedding=prompt_embeds_1,
925
+ end_embedding=prompt_embeds_2,
926
+ num_interpolation_steps=num_interpolation_steps,
927
+ interpolation_type=embedding_interpolation_type,
928
+ )
929
+ inference_latents = self.interpolate_latent(
930
+ start_latent=latents_1,
931
+ end_latent=latents_2,
932
+ num_interpolation_steps=num_interpolation_steps,
933
+ interpolation_type=latent_interpolation_type,
934
+ )
935
+ next_prompt_embeds = inference_embeddings[-1:].detach().clone()
936
+ next_latents = inference_latents[-1:].detach().clone()
937
+ bs = num_interpolation_steps
938
+
939
+ # 9. Perform inference in batches. Note the use of `process_batch_size` to control the batch size
940
+ # of the inference. This is useful for reducing memory usage and can be configured based on the
941
+ # available GPU memory.
942
+ with self.progress_bar(
943
+ total=(bs + process_batch_size - 1) // process_batch_size
944
+ ) as batch_progress_bar:
945
+ for batch_index in range(0, bs, process_batch_size):
946
+ batch_inference_latents = inference_latents[batch_index : batch_index + process_batch_size]
947
+ batch_inference_embedddings = inference_embeddings[
948
+ batch_index : batch_index + process_batch_size
949
+ ]
950
+
951
+ self.scheduler.set_timesteps(
952
+ num_inference_steps, device, original_inference_steps=original_inference_steps
953
+ )
954
+ timesteps = self.scheduler.timesteps
955
+
956
+ current_bs = batch_inference_embedddings.shape[0]
957
+ w = torch.tensor(self.guidance_scale - 1).repeat(current_bs)
958
+ w_embedding = self.get_guidance_scale_embedding(
959
+ w, embedding_dim=self.unet.config.time_cond_proj_dim
960
+ ).to(device=device, dtype=latents_1.dtype)
961
+
962
+ # 10. Perform inference for current batch
963
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
964
+ for index, t in enumerate(timesteps):
965
+ batch_inference_latents = batch_inference_latents.to(batch_inference_embedddings.dtype)
966
+
967
+ # model prediction (v-prediction, eps, x)
968
+ model_pred = self.unet(
969
+ batch_inference_latents,
970
+ t,
971
+ timestep_cond=w_embedding,
972
+ encoder_hidden_states=batch_inference_embedddings,
973
+ cross_attention_kwargs=self.cross_attention_kwargs,
974
+ return_dict=False,
975
+ )[0]
976
+
977
+ # compute the previous noisy sample x_t -> x_t-1
978
+ batch_inference_latents, denoised = self.scheduler.step(
979
+ model_pred, t, batch_inference_latents, **extra_step_kwargs, return_dict=False
980
+ )
981
+ if callback_on_step_end is not None:
982
+ callback_kwargs = {}
983
+ for k in callback_on_step_end_tensor_inputs:
984
+ callback_kwargs[k] = locals()[k]
985
+ callback_outputs = callback_on_step_end(self, index, t, callback_kwargs)
986
+
987
+ batch_inference_latents = callback_outputs.pop("latents", batch_inference_latents)
988
+ batch_inference_embedddings = callback_outputs.pop(
989
+ "prompt_embeds", batch_inference_embedddings
990
+ )
991
+ w_embedding = callback_outputs.pop("w_embedding", w_embedding)
992
+ denoised = callback_outputs.pop("denoised", denoised)
993
+
994
+ # call the callback, if provided
995
+ if index == len(timesteps) - 1 or (
996
+ (index + 1) > num_warmup_steps and (index + 1) % self.scheduler.order == 0
997
+ ):
998
+ progress_bar.update()
999
+ if callback is not None and index % callback_steps == 0:
1000
+ step_idx = index // getattr(self.scheduler, "order", 1)
1001
+ callback(step_idx, t, batch_inference_latents)
1002
+
1003
+ denoised = denoised.to(batch_inference_embedddings.dtype)
1004
+
1005
+ # Note: This is not supported because you would get black images in your latent walk if
1006
+ # NSFW concept is detected
1007
+ # if not output_type == "latent":
1008
+ # image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
1009
+ # image, has_nsfw_concept = self.run_safety_checker(image, device, inference_embeddings.dtype)
1010
+ # else:
1011
+ # image = denoised
1012
+ # has_nsfw_concept = None
1013
+
1014
+ # if has_nsfw_concept is None:
1015
+ # do_denormalize = [True] * image.shape[0]
1016
+ # else:
1017
+ # do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1018
+
1019
+ image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
1020
+ do_denormalize = [True] * image.shape[0]
1021
+ has_nsfw_concept = None
1022
+
1023
+ image = self.image_processor.postprocess(
1024
+ image, output_type=output_type, do_denormalize=do_denormalize
1025
+ )
1026
+ images.append(image)
1027
+
1028
+ batch_progress_bar.update()
1029
+
1030
+ prompt_embeds_1 = next_prompt_embeds
1031
+ latents_1 = next_latents
1032
+
1033
+ prompt_progress_bar.update()
1034
+
1035
+ # 11. Determine what should be returned
1036
+ if output_type == "pil":
1037
+ images = [image for image_list in images for image in image_list]
1038
+ elif output_type == "np":
1039
+ images = np.concatenate(images)
1040
+ elif output_type == "pt":
1041
+ images = torch.cat(images)
1042
+ else:
1043
+ raise ValueError("`output_type` must be one of 'pil', 'np' or 'pt'.")
1044
+
1045
+ # Offload all models
1046
+ self.maybe_free_model_hooks()
1047
+
1048
+ if not return_dict:
1049
+ return (images, has_nsfw_concept)
1050
+
1051
+ return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
v0.24.0/latent_consistency_txt2img.py ADDED
@@ -0,0 +1,728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, Dict, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
25
+
26
+ from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
27
+ from diffusers.configuration_utils import register_to_config
28
+ from diffusers.image_processor import VaeImageProcessor
29
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
30
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
31
+ from diffusers.utils import BaseOutput
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ class LatentConsistencyModelPipeline(DiffusionPipeline):
38
+ _optional_components = ["scheduler"]
39
+
40
+ def __init__(
41
+ self,
42
+ vae: AutoencoderKL,
43
+ text_encoder: CLIPTextModel,
44
+ tokenizer: CLIPTokenizer,
45
+ unet: UNet2DConditionModel,
46
+ scheduler: "LCMScheduler",
47
+ safety_checker: StableDiffusionSafetyChecker,
48
+ feature_extractor: CLIPImageProcessor,
49
+ requires_safety_checker: bool = True,
50
+ ):
51
+ super().__init__()
52
+
53
+ scheduler = (
54
+ scheduler
55
+ if scheduler is not None
56
+ else LCMScheduler(
57
+ beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
58
+ )
59
+ )
60
+
61
+ self.register_modules(
62
+ vae=vae,
63
+ text_encoder=text_encoder,
64
+ tokenizer=tokenizer,
65
+ unet=unet,
66
+ scheduler=scheduler,
67
+ safety_checker=safety_checker,
68
+ feature_extractor=feature_extractor,
69
+ )
70
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
71
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
72
+
73
+ def _encode_prompt(
74
+ self,
75
+ prompt,
76
+ device,
77
+ num_images_per_prompt,
78
+ prompt_embeds: None,
79
+ ):
80
+ r"""
81
+ Encodes the prompt into text encoder hidden states.
82
+ Args:
83
+ prompt (`str` or `List[str]`, *optional*):
84
+ prompt to be encoded
85
+ device: (`torch.device`):
86
+ torch device
87
+ num_images_per_prompt (`int`):
88
+ number of images that should be generated per prompt
89
+ prompt_embeds (`torch.FloatTensor`, *optional*):
90
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
91
+ provided, text embeddings will be generated from `prompt` input argument.
92
+ """
93
+
94
+ if prompt is not None and isinstance(prompt, str):
95
+ pass
96
+ elif prompt is not None and isinstance(prompt, list):
97
+ len(prompt)
98
+ else:
99
+ prompt_embeds.shape[0]
100
+
101
+ if prompt_embeds is None:
102
+ text_inputs = self.tokenizer(
103
+ prompt,
104
+ padding="max_length",
105
+ max_length=self.tokenizer.model_max_length,
106
+ truncation=True,
107
+ return_tensors="pt",
108
+ )
109
+ text_input_ids = text_inputs.input_ids
110
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
111
+
112
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
113
+ text_input_ids, untruncated_ids
114
+ ):
115
+ removed_text = self.tokenizer.batch_decode(
116
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
117
+ )
118
+ logger.warning(
119
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
120
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
121
+ )
122
+
123
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
124
+ attention_mask = text_inputs.attention_mask.to(device)
125
+ else:
126
+ attention_mask = None
127
+
128
+ prompt_embeds = self.text_encoder(
129
+ text_input_ids.to(device),
130
+ attention_mask=attention_mask,
131
+ )
132
+ prompt_embeds = prompt_embeds[0]
133
+
134
+ if self.text_encoder is not None:
135
+ prompt_embeds_dtype = self.text_encoder.dtype
136
+ elif self.unet is not None:
137
+ prompt_embeds_dtype = self.unet.dtype
138
+ else:
139
+ prompt_embeds_dtype = prompt_embeds.dtype
140
+
141
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
142
+
143
+ bs_embed, seq_len, _ = prompt_embeds.shape
144
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
145
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
146
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
147
+
148
+ # Don't need to get uncond prompt embedding because of LCM Guided Distillation
149
+ return prompt_embeds
150
+
151
+ def run_safety_checker(self, image, device, dtype):
152
+ if self.safety_checker is None:
153
+ has_nsfw_concept = None
154
+ else:
155
+ if torch.is_tensor(image):
156
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
157
+ else:
158
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
159
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
160
+ image, has_nsfw_concept = self.safety_checker(
161
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
162
+ )
163
+ return image, has_nsfw_concept
164
+
165
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):
166
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
167
+ if latents is None:
168
+ latents = torch.randn(shape, dtype=dtype).to(device)
169
+ else:
170
+ latents = latents.to(device)
171
+ # scale the initial noise by the standard deviation required by the scheduler
172
+ latents = latents * self.scheduler.init_noise_sigma
173
+ return latents
174
+
175
+ def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
176
+ """
177
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
178
+ Args:
179
+ timesteps: torch.Tensor: generate embedding vectors at these timesteps
180
+ embedding_dim: int: dimension of the embeddings to generate
181
+ dtype: data type of the generated embeddings
182
+ Returns:
183
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
184
+ """
185
+ assert len(w.shape) == 1
186
+ w = w * 1000.0
187
+
188
+ half_dim = embedding_dim // 2
189
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
190
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
191
+ emb = w.to(dtype)[:, None] * emb[None, :]
192
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
193
+ if embedding_dim % 2 == 1: # zero pad
194
+ emb = torch.nn.functional.pad(emb, (0, 1))
195
+ assert emb.shape == (w.shape[0], embedding_dim)
196
+ return emb
197
+
198
+ @torch.no_grad()
199
+ def __call__(
200
+ self,
201
+ prompt: Union[str, List[str]] = None,
202
+ height: Optional[int] = 768,
203
+ width: Optional[int] = 768,
204
+ guidance_scale: float = 7.5,
205
+ num_images_per_prompt: Optional[int] = 1,
206
+ latents: Optional[torch.FloatTensor] = None,
207
+ num_inference_steps: int = 4,
208
+ lcm_origin_steps: int = 50,
209
+ prompt_embeds: Optional[torch.FloatTensor] = None,
210
+ output_type: Optional[str] = "pil",
211
+ return_dict: bool = True,
212
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
213
+ ):
214
+ # 0. Default height and width to unet
215
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
216
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
217
+
218
+ # 2. Define call parameters
219
+ if prompt is not None and isinstance(prompt, str):
220
+ batch_size = 1
221
+ elif prompt is not None and isinstance(prompt, list):
222
+ batch_size = len(prompt)
223
+ else:
224
+ batch_size = prompt_embeds.shape[0]
225
+
226
+ device = self._execution_device
227
+ # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
228
+
229
+ # 3. Encode input prompt
230
+ prompt_embeds = self._encode_prompt(
231
+ prompt,
232
+ device,
233
+ num_images_per_prompt,
234
+ prompt_embeds=prompt_embeds,
235
+ )
236
+
237
+ # 4. Prepare timesteps
238
+ self.scheduler.set_timesteps(num_inference_steps, lcm_origin_steps)
239
+ timesteps = self.scheduler.timesteps
240
+
241
+ # 5. Prepare latent variable
242
+ num_channels_latents = self.unet.config.in_channels
243
+ latents = self.prepare_latents(
244
+ batch_size * num_images_per_prompt,
245
+ num_channels_latents,
246
+ height,
247
+ width,
248
+ prompt_embeds.dtype,
249
+ device,
250
+ latents,
251
+ )
252
+ bs = batch_size * num_images_per_prompt
253
+
254
+ # 6. Get Guidance Scale Embedding
255
+ w = torch.tensor(guidance_scale).repeat(bs)
256
+ w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
257
+
258
+ # 7. LCM MultiStep Sampling Loop:
259
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
260
+ for i, t in enumerate(timesteps):
261
+ ts = torch.full((bs,), t, device=device, dtype=torch.long)
262
+ latents = latents.to(prompt_embeds.dtype)
263
+
264
+ # model prediction (v-prediction, eps, x)
265
+ model_pred = self.unet(
266
+ latents,
267
+ ts,
268
+ timestep_cond=w_embedding,
269
+ encoder_hidden_states=prompt_embeds,
270
+ cross_attention_kwargs=cross_attention_kwargs,
271
+ return_dict=False,
272
+ )[0]
273
+
274
+ # compute the previous noisy sample x_t -> x_t-1
275
+ latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
276
+
277
+ # # call the callback, if provided
278
+ # if i == len(timesteps) - 1:
279
+ progress_bar.update()
280
+
281
+ denoised = denoised.to(prompt_embeds.dtype)
282
+ if not output_type == "latent":
283
+ image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
284
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
285
+ else:
286
+ image = denoised
287
+ has_nsfw_concept = None
288
+
289
+ if has_nsfw_concept is None:
290
+ do_denormalize = [True] * image.shape[0]
291
+ else:
292
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
293
+
294
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
295
+
296
+ if not return_dict:
297
+ return (image, has_nsfw_concept)
298
+
299
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
300
+
301
+
302
+ @dataclass
303
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
304
+ class LCMSchedulerOutput(BaseOutput):
305
+ """
306
+ Output class for the scheduler's `step` function output.
307
+ Args:
308
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
309
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
310
+ denoising loop.
311
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
312
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
313
+ `pred_original_sample` can be used to preview progress or for guidance.
314
+ """
315
+
316
+ prev_sample: torch.FloatTensor
317
+ denoised: Optional[torch.FloatTensor] = None
318
+
319
+
320
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
321
+ def betas_for_alpha_bar(
322
+ num_diffusion_timesteps,
323
+ max_beta=0.999,
324
+ alpha_transform_type="cosine",
325
+ ):
326
+ """
327
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
328
+ (1-beta) over time from t = [0,1].
329
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
330
+ to that part of the diffusion process.
331
+ Args:
332
+ num_diffusion_timesteps (`int`): the number of betas to produce.
333
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
334
+ prevent singularities.
335
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
336
+ Choose from `cosine` or `exp`
337
+ Returns:
338
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
339
+ """
340
+ if alpha_transform_type == "cosine":
341
+
342
+ def alpha_bar_fn(t):
343
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
344
+
345
+ elif alpha_transform_type == "exp":
346
+
347
+ def alpha_bar_fn(t):
348
+ return math.exp(t * -12.0)
349
+
350
+ else:
351
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
352
+
353
+ betas = []
354
+ for i in range(num_diffusion_timesteps):
355
+ t1 = i / num_diffusion_timesteps
356
+ t2 = (i + 1) / num_diffusion_timesteps
357
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
358
+ return torch.tensor(betas, dtype=torch.float32)
359
+
360
+
361
+ def rescale_zero_terminal_snr(betas):
362
+ """
363
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
364
+ Args:
365
+ betas (`torch.FloatTensor`):
366
+ the betas that the scheduler is being initialized with.
367
+ Returns:
368
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
369
+ """
370
+ # Convert betas to alphas_bar_sqrt
371
+ alphas = 1.0 - betas
372
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
373
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
374
+
375
+ # Store old values.
376
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
377
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
378
+
379
+ # Shift so the last timestep is zero.
380
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
381
+
382
+ # Scale so the first timestep is back to the old value.
383
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
384
+
385
+ # Convert alphas_bar_sqrt to betas
386
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
387
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
388
+ alphas = torch.cat([alphas_bar[0:1], alphas])
389
+ betas = 1 - alphas
390
+
391
+ return betas
392
+
393
+
394
+ class LCMScheduler(SchedulerMixin, ConfigMixin):
395
+ """
396
+ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
397
+ non-Markovian guidance.
398
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
399
+ methods the library implements for all schedulers such as loading and saving.
400
+ Args:
401
+ num_train_timesteps (`int`, defaults to 1000):
402
+ The number of diffusion steps to train the model.
403
+ beta_start (`float`, defaults to 0.0001):
404
+ The starting `beta` value of inference.
405
+ beta_end (`float`, defaults to 0.02):
406
+ The final `beta` value.
407
+ beta_schedule (`str`, defaults to `"linear"`):
408
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
409
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
410
+ trained_betas (`np.ndarray`, *optional*):
411
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
412
+ clip_sample (`bool`, defaults to `True`):
413
+ Clip the predicted sample for numerical stability.
414
+ clip_sample_range (`float`, defaults to 1.0):
415
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
416
+ set_alpha_to_one (`bool`, defaults to `True`):
417
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
418
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
419
+ otherwise it uses the alpha value at step 0.
420
+ steps_offset (`int`, defaults to 0):
421
+ An offset added to the inference steps. You can use a combination of `offset=1` and
422
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
423
+ Diffusion.
424
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
425
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
426
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
427
+ Video](https://imagen.research.google/video/paper.pdf) paper).
428
+ thresholding (`bool`, defaults to `False`):
429
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
430
+ as Stable Diffusion.
431
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
432
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
433
+ sample_max_value (`float`, defaults to 1.0):
434
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
435
+ timestep_spacing (`str`, defaults to `"leading"`):
436
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
437
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
438
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
439
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
440
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
441
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
442
+ """
443
+
444
+ # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
445
+ order = 1
446
+
447
+ @register_to_config
448
+ def __init__(
449
+ self,
450
+ num_train_timesteps: int = 1000,
451
+ beta_start: float = 0.0001,
452
+ beta_end: float = 0.02,
453
+ beta_schedule: str = "linear",
454
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
455
+ clip_sample: bool = True,
456
+ set_alpha_to_one: bool = True,
457
+ steps_offset: int = 0,
458
+ prediction_type: str = "epsilon",
459
+ thresholding: bool = False,
460
+ dynamic_thresholding_ratio: float = 0.995,
461
+ clip_sample_range: float = 1.0,
462
+ sample_max_value: float = 1.0,
463
+ timestep_spacing: str = "leading",
464
+ rescale_betas_zero_snr: bool = False,
465
+ ):
466
+ if trained_betas is not None:
467
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
468
+ elif beta_schedule == "linear":
469
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
470
+ elif beta_schedule == "scaled_linear":
471
+ # this schedule is very specific to the latent diffusion model.
472
+ self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
473
+ elif beta_schedule == "squaredcos_cap_v2":
474
+ # Glide cosine schedule
475
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
476
+ else:
477
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
478
+
479
+ # Rescale for zero SNR
480
+ if rescale_betas_zero_snr:
481
+ self.betas = rescale_zero_terminal_snr(self.betas)
482
+
483
+ self.alphas = 1.0 - self.betas
484
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
485
+
486
+ # At every step in ddim, we are looking into the previous alphas_cumprod
487
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
488
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
489
+ # whether we use the final alpha of the "non-previous" one.
490
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
491
+
492
+ # standard deviation of the initial noise distribution
493
+ self.init_noise_sigma = 1.0
494
+
495
+ # setable values
496
+ self.num_inference_steps = None
497
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
498
+
499
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
500
+ """
501
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
502
+ current timestep.
503
+ Args:
504
+ sample (`torch.FloatTensor`):
505
+ The input sample.
506
+ timestep (`int`, *optional*):
507
+ The current timestep in the diffusion chain.
508
+ Returns:
509
+ `torch.FloatTensor`:
510
+ A scaled input sample.
511
+ """
512
+ return sample
513
+
514
+ def _get_variance(self, timestep, prev_timestep):
515
+ alpha_prod_t = self.alphas_cumprod[timestep]
516
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
517
+ beta_prod_t = 1 - alpha_prod_t
518
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
519
+
520
+ variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
521
+
522
+ return variance
523
+
524
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
525
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
526
+ """
527
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
528
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
529
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
530
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
531
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
532
+ https://arxiv.org/abs/2205.11487
533
+ """
534
+ dtype = sample.dtype
535
+ batch_size, channels, height, width = sample.shape
536
+
537
+ if dtype not in (torch.float32, torch.float64):
538
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
539
+
540
+ # Flatten sample for doing quantile calculation along each image
541
+ sample = sample.reshape(batch_size, channels * height * width)
542
+
543
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
544
+
545
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
546
+ s = torch.clamp(
547
+ s, min=1, max=self.config.sample_max_value
548
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
549
+
550
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
551
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
552
+
553
+ sample = sample.reshape(batch_size, channels, height, width)
554
+ sample = sample.to(dtype)
555
+
556
+ return sample
557
+
558
+ def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):
559
+ """
560
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
561
+ Args:
562
+ num_inference_steps (`int`):
563
+ The number of diffusion steps used when generating samples with a pre-trained model.
564
+ """
565
+
566
+ if num_inference_steps > self.config.num_train_timesteps:
567
+ raise ValueError(
568
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
569
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
570
+ f" maximal {self.config.num_train_timesteps} timesteps."
571
+ )
572
+
573
+ self.num_inference_steps = num_inference_steps
574
+
575
+ # LCM Timesteps Setting: # Linear Spacing
576
+ c = self.config.num_train_timesteps // lcm_origin_steps
577
+ lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule
578
+ skipping_step = len(lcm_origin_timesteps) // num_inference_steps
579
+ timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
580
+
581
+ self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
582
+
583
+ def get_scalings_for_boundary_condition_discrete(self, t):
584
+ self.sigma_data = 0.5 # Default: 0.5
585
+
586
+ # By dividing 0.1: This is almost a delta function at t=0.
587
+ c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
588
+ c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
589
+ return c_skip, c_out
590
+
591
+ def step(
592
+ self,
593
+ model_output: torch.FloatTensor,
594
+ timeindex: int,
595
+ timestep: int,
596
+ sample: torch.FloatTensor,
597
+ eta: float = 0.0,
598
+ use_clipped_model_output: bool = False,
599
+ generator=None,
600
+ variance_noise: Optional[torch.FloatTensor] = None,
601
+ return_dict: bool = True,
602
+ ) -> Union[LCMSchedulerOutput, Tuple]:
603
+ """
604
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
605
+ process from the learned model outputs (most often the predicted noise).
606
+ Args:
607
+ model_output (`torch.FloatTensor`):
608
+ The direct output from learned diffusion model.
609
+ timestep (`float`):
610
+ The current discrete timestep in the diffusion chain.
611
+ sample (`torch.FloatTensor`):
612
+ A current instance of a sample created by the diffusion process.
613
+ eta (`float`):
614
+ The weight of noise for added noise in diffusion step.
615
+ use_clipped_model_output (`bool`, defaults to `False`):
616
+ If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
617
+ because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
618
+ clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
619
+ `use_clipped_model_output` has no effect.
620
+ generator (`torch.Generator`, *optional*):
621
+ A random number generator.
622
+ variance_noise (`torch.FloatTensor`):
623
+ Alternative to generating noise with `generator` by directly providing the noise for the variance
624
+ itself. Useful for methods such as [`CycleDiffusion`].
625
+ return_dict (`bool`, *optional*, defaults to `True`):
626
+ Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
627
+ Returns:
628
+ [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
629
+ If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
630
+ tuple is returned where the first element is the sample tensor.
631
+ """
632
+ if self.num_inference_steps is None:
633
+ raise ValueError(
634
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
635
+ )
636
+
637
+ # 1. get previous step value
638
+ prev_timeindex = timeindex + 1
639
+ if prev_timeindex < len(self.timesteps):
640
+ prev_timestep = self.timesteps[prev_timeindex]
641
+ else:
642
+ prev_timestep = timestep
643
+
644
+ # 2. compute alphas, betas
645
+ alpha_prod_t = self.alphas_cumprod[timestep]
646
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
647
+
648
+ beta_prod_t = 1 - alpha_prod_t
649
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
650
+
651
+ # 3. Get scalings for boundary conditions
652
+ c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
653
+
654
+ # 4. Different Parameterization:
655
+ parameterization = self.config.prediction_type
656
+
657
+ if parameterization == "epsilon": # noise-prediction
658
+ pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
659
+
660
+ elif parameterization == "sample": # x-prediction
661
+ pred_x0 = model_output
662
+
663
+ elif parameterization == "v_prediction": # v-prediction
664
+ pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
665
+
666
+ # 4. Denoise model output using boundary conditions
667
+ denoised = c_out * pred_x0 + c_skip * sample
668
+
669
+ # 5. Sample z ~ N(0, I), For MultiStep Inference
670
+ # Noise is not used for one-step sampling.
671
+ if len(self.timesteps) > 1:
672
+ noise = torch.randn(model_output.shape).to(model_output.device)
673
+ prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
674
+ else:
675
+ prev_sample = denoised
676
+
677
+ if not return_dict:
678
+ return (prev_sample, denoised)
679
+
680
+ return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
681
+
682
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
683
+ def add_noise(
684
+ self,
685
+ original_samples: torch.FloatTensor,
686
+ noise: torch.FloatTensor,
687
+ timesteps: torch.IntTensor,
688
+ ) -> torch.FloatTensor:
689
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
690
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
691
+ timesteps = timesteps.to(original_samples.device)
692
+
693
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
694
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
695
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
696
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
697
+
698
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
699
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
700
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
701
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
702
+
703
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
704
+ return noisy_samples
705
+
706
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
707
+ def get_velocity(
708
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
709
+ ) -> torch.FloatTensor:
710
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
711
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
712
+ timesteps = timesteps.to(sample.device)
713
+
714
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
715
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
716
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
717
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
718
+
719
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
720
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
721
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
722
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
723
+
724
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
725
+ return velocity
726
+
727
+ def __len__(self):
728
+ return self.config.num_train_timesteps
v0.24.0/llm_grounded_diffusion.py ADDED
@@ -0,0 +1,1015 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Long Lian, the GLIGEN Authors, and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # This is a single file implementation of LMD+. See README.md for examples.
16
+
17
+ import ast
18
+ import gc
19
+ import math
20
+ import warnings
21
+ from collections.abc import Iterable
22
+ from typing import Any, Callable, Dict, List, Optional, Union
23
+
24
+ import torch
25
+ import torch.nn.functional as F
26
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
27
+
28
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
29
+ from diffusers.models.attention import Attention, GatedSelfAttentionDense
30
+ from diffusers.models.attention_processor import AttnProcessor2_0
31
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
32
+ from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
33
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
34
+ from diffusers.schedulers import KarrasDiffusionSchedulers
35
+ from diffusers.utils import logging, replace_example_docstring
36
+
37
+
38
+ EXAMPLE_DOC_STRING = """
39
+ Examples:
40
+ ```py
41
+ >>> import torch
42
+ >>> from diffusers import DiffusionPipeline
43
+
44
+ >>> pipe = DiffusionPipeline.from_pretrained(
45
+ ... "longlian/lmd_plus",
46
+ ... custom_pipeline="llm_grounded_diffusion",
47
+ ... variant="fp16", torch_dtype=torch.float16
48
+ ... )
49
+ >>> pipe.enable_model_cpu_offload()
50
+
51
+ >>> # Generate an image described by the prompt and
52
+ >>> # insert objects described by text at the region defined by bounding boxes
53
+ >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
54
+ >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]]
55
+ >>> phrases = ["a waterfall", "a modern high speed train"]
56
+
57
+ >>> images = pipe(
58
+ ... prompt=prompt,
59
+ ... phrases=phrases,
60
+ ... boxes=boxes,
61
+ ... gligen_scheduled_sampling_beta=0.4,
62
+ ... output_type="pil",
63
+ ... num_inference_steps=50,
64
+ ... lmd_guidance_kwargs={}
65
+ ... ).images
66
+
67
+ >>> images[0].save("./lmd_plus_generation.jpg")
68
+
69
+ >>> # Generate directly from a text prompt and an LLM response
70
+ >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
71
+ >>> phrases, boxes, bg_prompt, neg_prompt = pipe.parse_llm_response(\"""
72
+ [('a waterfall', [71, 105, 148, 258]), ('a modern high speed train', [255, 223, 181, 149])]
73
+ Background prompt: A beautiful forest with fall foliage
74
+ Negative prompt:
75
+ \""")
76
+
77
+ >> images = pipe(
78
+ ... prompt=prompt,
79
+ ... negative_prompt=neg_prompt,
80
+ ... phrases=phrases,
81
+ ... boxes=boxes,
82
+ ... gligen_scheduled_sampling_beta=0.4,
83
+ ... output_type="pil",
84
+ ... num_inference_steps=50,
85
+ ... lmd_guidance_kwargs={}
86
+ ... ).images
87
+
88
+ >>> images[0].save("./lmd_plus_generation.jpg")
89
+
90
+ images[0]
91
+
92
+ ```
93
+ """
94
+
95
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
96
+
97
+ # All keys in Stable Diffusion models: [('down', 0, 0, 0), ('down', 0, 1, 0), ('down', 1, 0, 0), ('down', 1, 1, 0), ('down', 2, 0, 0), ('down', 2, 1, 0), ('mid', 0, 0, 0), ('up', 1, 0, 0), ('up', 1, 1, 0), ('up', 1, 2, 0), ('up', 2, 0, 0), ('up', 2, 1, 0), ('up', 2, 2, 0), ('up', 3, 0, 0), ('up', 3, 1, 0), ('up', 3, 2, 0)]
98
+ # Note that the first up block is `UpBlock2D` rather than `CrossAttnUpBlock2D` and does not have attention. The last index is always 0 in our case since we have one `BasicTransformerBlock` in each `Transformer2DModel`.
99
+ DEFAULT_GUIDANCE_ATTN_KEYS = [("mid", 0, 0, 0), ("up", 1, 0, 0), ("up", 1, 1, 0), ("up", 1, 2, 0)]
100
+
101
+
102
+ def convert_attn_keys(key):
103
+ """Convert the attention key from tuple format to the torch state format"""
104
+
105
+ if key[0] == "mid":
106
+ assert key[1] == 0, f"mid block only has one block but the index is {key[1]}"
107
+ return f"{key[0]}_block.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
108
+
109
+ return f"{key[0]}_blocks.{key[1]}.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
110
+
111
+
112
+ DEFAULT_GUIDANCE_ATTN_KEYS = [convert_attn_keys(key) for key in DEFAULT_GUIDANCE_ATTN_KEYS]
113
+
114
+
115
+ def scale_proportion(obj_box, H, W):
116
+ # Separately rounding box_w and box_h to allow shift invariant box sizes. Otherwise box sizes may change when both coordinates being rounded end with ".5".
117
+ x_min, y_min = round(obj_box[0] * W), round(obj_box[1] * H)
118
+ box_w, box_h = round((obj_box[2] - obj_box[0]) * W), round((obj_box[3] - obj_box[1]) * H)
119
+ x_max, y_max = x_min + box_w, y_min + box_h
120
+
121
+ x_min, y_min = max(x_min, 0), max(y_min, 0)
122
+ x_max, y_max = min(x_max, W), min(y_max, H)
123
+
124
+ return x_min, y_min, x_max, y_max
125
+
126
+
127
+ # Adapted from the parent class `AttnProcessor2_0`
128
+ class AttnProcessorWithHook(AttnProcessor2_0):
129
+ def __init__(self, attn_processor_key, hidden_size, cross_attention_dim, hook=None, fast_attn=True, enabled=True):
130
+ super().__init__()
131
+ self.attn_processor_key = attn_processor_key
132
+ self.hidden_size = hidden_size
133
+ self.cross_attention_dim = cross_attention_dim
134
+ self.hook = hook
135
+ self.fast_attn = fast_attn
136
+ self.enabled = enabled
137
+
138
+ def __call__(
139
+ self,
140
+ attn: Attention,
141
+ hidden_states,
142
+ encoder_hidden_states=None,
143
+ attention_mask=None,
144
+ temb=None,
145
+ scale: float = 1.0,
146
+ ):
147
+ residual = hidden_states
148
+
149
+ if attn.spatial_norm is not None:
150
+ hidden_states = attn.spatial_norm(hidden_states, temb)
151
+
152
+ input_ndim = hidden_states.ndim
153
+
154
+ if input_ndim == 4:
155
+ batch_size, channel, height, width = hidden_states.shape
156
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
157
+
158
+ batch_size, sequence_length, _ = (
159
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
160
+ )
161
+
162
+ if attention_mask is not None:
163
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
164
+
165
+ if attn.group_norm is not None:
166
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
167
+
168
+ query = attn.to_q(hidden_states, scale=scale)
169
+
170
+ if encoder_hidden_states is None:
171
+ encoder_hidden_states = hidden_states
172
+ elif attn.norm_cross:
173
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
174
+
175
+ key = attn.to_k(encoder_hidden_states, scale=scale)
176
+ value = attn.to_v(encoder_hidden_states, scale=scale)
177
+
178
+ inner_dim = key.shape[-1]
179
+ head_dim = inner_dim // attn.heads
180
+
181
+ if (self.hook is not None and self.enabled) or not self.fast_attn:
182
+ query_batch_dim = attn.head_to_batch_dim(query)
183
+ key_batch_dim = attn.head_to_batch_dim(key)
184
+ value_batch_dim = attn.head_to_batch_dim(value)
185
+ attention_probs = attn.get_attention_scores(query_batch_dim, key_batch_dim, attention_mask)
186
+
187
+ if self.hook is not None and self.enabled:
188
+ # Call the hook with query, key, value, and attention maps
189
+ self.hook(self.attn_processor_key, query_batch_dim, key_batch_dim, value_batch_dim, attention_probs)
190
+
191
+ if self.fast_attn:
192
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
193
+
194
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
195
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
196
+
197
+ if attention_mask is not None:
198
+ # scaled_dot_product_attention expects attention_mask shape to be
199
+ # (batch, heads, source_length, target_length)
200
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
201
+
202
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
203
+ # TODO: add support for attn.scale when we move to Torch 2.1
204
+ hidden_states = F.scaled_dot_product_attention(
205
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
206
+ )
207
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
208
+ hidden_states = hidden_states.to(query.dtype)
209
+ else:
210
+ hidden_states = torch.bmm(attention_probs, value)
211
+ hidden_states = attn.batch_to_head_dim(hidden_states)
212
+
213
+ # linear proj
214
+ hidden_states = attn.to_out[0](hidden_states, scale=scale)
215
+ # dropout
216
+ hidden_states = attn.to_out[1](hidden_states)
217
+
218
+ if input_ndim == 4:
219
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
220
+
221
+ if attn.residual_connection:
222
+ hidden_states = hidden_states + residual
223
+
224
+ hidden_states = hidden_states / attn.rescale_output_factor
225
+
226
+ return hidden_states
227
+
228
+
229
+ class LLMGroundedDiffusionPipeline(StableDiffusionPipeline):
230
+ r"""
231
+ Pipeline for layout-grounded text-to-image generation using LLM-grounded Diffusion (LMD+): https://arxiv.org/pdf/2305.13655.pdf.
232
+
233
+ This model inherits from [`StableDiffusionPipeline`] and aims at implementing the pipeline with minimal modifications. Check the superclass documentation for the generic methods
234
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
235
+
236
+ This is a simplified implementation that does not perform latent or attention transfer from single object generation to overall generation. The final image is generated directly with attention and adapters control.
237
+
238
+ Args:
239
+ vae ([`AutoencoderKL`]):
240
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
241
+ text_encoder ([`~transformers.CLIPTextModel`]):
242
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
243
+ tokenizer ([`~transformers.CLIPTokenizer`]):
244
+ A `CLIPTokenizer` to tokenize text.
245
+ unet ([`UNet2DConditionModel`]):
246
+ A `UNet2DConditionModel` to denoise the encoded image latents.
247
+ scheduler ([`SchedulerMixin`]):
248
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
249
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
250
+ safety_checker ([`StableDiffusionSafetyChecker`]):
251
+ Classification module that estimates whether generated images could be considered offensive or harmful.
252
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
253
+ about a model's potential harms.
254
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
255
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
256
+ requires_safety_checker (bool):
257
+ Whether a safety checker is needed for this pipeline.
258
+ """
259
+
260
+ objects_text = "Objects: "
261
+ bg_prompt_text = "Background prompt: "
262
+ bg_prompt_text_no_trailing_space = bg_prompt_text.rstrip()
263
+ neg_prompt_text = "Negative prompt: "
264
+ neg_prompt_text_no_trailing_space = neg_prompt_text.rstrip()
265
+
266
+ def __init__(
267
+ self,
268
+ vae: AutoencoderKL,
269
+ text_encoder: CLIPTextModel,
270
+ tokenizer: CLIPTokenizer,
271
+ unet: UNet2DConditionModel,
272
+ scheduler: KarrasDiffusionSchedulers,
273
+ safety_checker: StableDiffusionSafetyChecker,
274
+ feature_extractor: CLIPImageProcessor,
275
+ requires_safety_checker: bool = True,
276
+ ):
277
+ super().__init__(
278
+ vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker
279
+ )
280
+
281
+ self.register_attn_hooks(unet)
282
+ self._saved_attn = None
283
+
284
+ def attn_hook(self, name, query, key, value, attention_probs):
285
+ if name in DEFAULT_GUIDANCE_ATTN_KEYS:
286
+ self._saved_attn[name] = attention_probs
287
+
288
+ @classmethod
289
+ def convert_box(cls, box, height, width):
290
+ # box: x, y, w, h (in 512 format) -> x_min, y_min, x_max, y_max
291
+ x_min, y_min = box[0] / width, box[1] / height
292
+ w_box, h_box = box[2] / width, box[3] / height
293
+
294
+ x_max, y_max = x_min + w_box, y_min + h_box
295
+
296
+ return x_min, y_min, x_max, y_max
297
+
298
+ @classmethod
299
+ def _parse_response_with_negative(cls, text):
300
+ if not text:
301
+ raise ValueError("LLM response is empty")
302
+
303
+ if cls.objects_text in text:
304
+ text = text.split(cls.objects_text)[1]
305
+
306
+ text_split = text.split(cls.bg_prompt_text_no_trailing_space)
307
+ if len(text_split) == 2:
308
+ gen_boxes, text_rem = text_split
309
+ else:
310
+ raise ValueError(f"LLM response is incomplete: {text}")
311
+
312
+ text_split = text_rem.split(cls.neg_prompt_text_no_trailing_space)
313
+
314
+ if len(text_split) == 2:
315
+ bg_prompt, neg_prompt = text_split
316
+ else:
317
+ raise ValueError(f"LLM response is incomplete: {text}")
318
+
319
+ try:
320
+ gen_boxes = ast.literal_eval(gen_boxes)
321
+ except SyntaxError as e:
322
+ # Sometimes the response is in plain text
323
+ if "No objects" in gen_boxes or gen_boxes.strip() == "":
324
+ gen_boxes = []
325
+ else:
326
+ raise e
327
+ bg_prompt = bg_prompt.strip()
328
+ neg_prompt = neg_prompt.strip()
329
+
330
+ # LLM may return "None" to mean no negative prompt provided.
331
+ if neg_prompt == "None":
332
+ neg_prompt = ""
333
+
334
+ return gen_boxes, bg_prompt, neg_prompt
335
+
336
+ @classmethod
337
+ def parse_llm_response(cls, response, canvas_height=512, canvas_width=512):
338
+ # Infer from spec
339
+ gen_boxes, bg_prompt, neg_prompt = cls._parse_response_with_negative(text=response)
340
+
341
+ gen_boxes = sorted(gen_boxes, key=lambda gen_box: gen_box[0])
342
+
343
+ phrases = [name for name, _ in gen_boxes]
344
+ boxes = [cls.convert_box(box, height=canvas_height, width=canvas_width) for _, box in gen_boxes]
345
+
346
+ return phrases, boxes, bg_prompt, neg_prompt
347
+
348
+ def check_inputs(
349
+ self,
350
+ prompt,
351
+ height,
352
+ width,
353
+ callback_steps,
354
+ phrases,
355
+ boxes,
356
+ negative_prompt=None,
357
+ prompt_embeds=None,
358
+ negative_prompt_embeds=None,
359
+ phrase_indices=None,
360
+ ):
361
+ if height % 8 != 0 or width % 8 != 0:
362
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
363
+
364
+ if (callback_steps is None) or (
365
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
366
+ ):
367
+ raise ValueError(
368
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
369
+ f" {type(callback_steps)}."
370
+ )
371
+
372
+ if prompt is not None and prompt_embeds is not None:
373
+ raise ValueError(
374
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
375
+ " only forward one of the two."
376
+ )
377
+ elif prompt is None and prompt_embeds is None:
378
+ raise ValueError(
379
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
380
+ )
381
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
382
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
383
+ elif prompt is None and phrase_indices is None:
384
+ raise ValueError("If the prompt is None, the phrase_indices cannot be None")
385
+
386
+ if negative_prompt is not None and negative_prompt_embeds is not None:
387
+ raise ValueError(
388
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
389
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
390
+ )
391
+
392
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
393
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
394
+ raise ValueError(
395
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
396
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
397
+ f" {negative_prompt_embeds.shape}."
398
+ )
399
+
400
+ if len(phrases) != len(boxes):
401
+ ValueError(
402
+ "length of `phrases` and `boxes` has to be same, but"
403
+ f" got: `phrases` {len(phrases)} != `boxes` {len(boxes)}"
404
+ )
405
+
406
+ def register_attn_hooks(self, unet):
407
+ """Registering hooks to obtain the attention maps for guidance"""
408
+
409
+ attn_procs = {}
410
+
411
+ for name in unet.attn_processors.keys():
412
+ # Only obtain the queries and keys from cross-attention
413
+ if name.endswith("attn1.processor") or name.endswith("fuser.attn.processor"):
414
+ # Keep the same attn_processors for self-attention (no hooks for self-attention)
415
+ attn_procs[name] = unet.attn_processors[name]
416
+ continue
417
+
418
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
419
+
420
+ if name.startswith("mid_block"):
421
+ hidden_size = unet.config.block_out_channels[-1]
422
+ elif name.startswith("up_blocks"):
423
+ block_id = int(name[len("up_blocks.")])
424
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
425
+ elif name.startswith("down_blocks"):
426
+ block_id = int(name[len("down_blocks.")])
427
+ hidden_size = unet.config.block_out_channels[block_id]
428
+
429
+ attn_procs[name] = AttnProcessorWithHook(
430
+ attn_processor_key=name,
431
+ hidden_size=hidden_size,
432
+ cross_attention_dim=cross_attention_dim,
433
+ hook=self.attn_hook,
434
+ fast_attn=True,
435
+ # Not enabled by default
436
+ enabled=False,
437
+ )
438
+
439
+ unet.set_attn_processor(attn_procs)
440
+
441
+ def enable_fuser(self, enabled=True):
442
+ for module in self.unet.modules():
443
+ if isinstance(module, GatedSelfAttentionDense):
444
+ module.enabled = enabled
445
+
446
+ def enable_attn_hook(self, enabled=True):
447
+ for module in self.unet.attn_processors.values():
448
+ if isinstance(module, AttnProcessorWithHook):
449
+ module.enabled = enabled
450
+
451
+ def get_token_map(self, prompt, padding="do_not_pad", verbose=False):
452
+ """Get a list of mapping: prompt index to str (prompt in a list of token str)"""
453
+ fg_prompt_tokens = self.tokenizer([prompt], padding=padding, max_length=77, return_tensors="np")
454
+ input_ids = fg_prompt_tokens["input_ids"][0]
455
+
456
+ token_map = []
457
+ for ind, item in enumerate(input_ids.tolist()):
458
+ token = self.tokenizer._convert_id_to_token(item)
459
+
460
+ if verbose:
461
+ logger.info(f"{ind}, {token} ({item})")
462
+
463
+ token_map.append(token)
464
+
465
+ return token_map
466
+
467
+ def get_phrase_indices(self, prompt, phrases, token_map=None, add_suffix_if_not_found=False, verbose=False):
468
+ for obj in phrases:
469
+ # Suffix the prompt with object name for attention guidance if object is not in the prompt, using "|" to separate the prompt and the suffix
470
+ if obj not in prompt:
471
+ prompt += "| " + obj
472
+
473
+ if token_map is None:
474
+ # We allow using a pre-computed token map.
475
+ token_map = self.get_token_map(prompt=prompt, padding="do_not_pad", verbose=verbose)
476
+ token_map_str = " ".join(token_map)
477
+
478
+ phrase_indices = []
479
+
480
+ for obj in phrases:
481
+ phrase_token_map = self.get_token_map(prompt=obj, padding="do_not_pad", verbose=verbose)
482
+ # Remove <bos> and <eos> in substr
483
+ phrase_token_map = phrase_token_map[1:-1]
484
+ phrase_token_map_len = len(phrase_token_map)
485
+ phrase_token_map_str = " ".join(phrase_token_map)
486
+
487
+ if verbose:
488
+ logger.info("Full str:", token_map_str, "Substr:", phrase_token_map_str, "Phrase:", phrases)
489
+
490
+ # Count the number of token before substr
491
+ # The substring comes with a trailing space that needs to be removed by minus one in the index.
492
+ obj_first_index = len(token_map_str[: token_map_str.index(phrase_token_map_str) - 1].split(" "))
493
+
494
+ obj_position = list(range(obj_first_index, obj_first_index + phrase_token_map_len))
495
+ phrase_indices.append(obj_position)
496
+
497
+ if add_suffix_if_not_found:
498
+ return phrase_indices, prompt
499
+
500
+ return phrase_indices
501
+
502
+ def add_ca_loss_per_attn_map_to_loss(
503
+ self,
504
+ loss,
505
+ attn_map,
506
+ object_number,
507
+ bboxes,
508
+ phrase_indices,
509
+ fg_top_p=0.2,
510
+ bg_top_p=0.2,
511
+ fg_weight=1.0,
512
+ bg_weight=1.0,
513
+ ):
514
+ # b is the number of heads, not batch
515
+ b, i, j = attn_map.shape
516
+ H = W = int(math.sqrt(i))
517
+ for obj_idx in range(object_number):
518
+ obj_loss = 0
519
+ mask = torch.zeros(size=(H, W), device="cuda")
520
+ obj_boxes = bboxes[obj_idx]
521
+
522
+ # We support two level (one box per phrase) and three level (multiple boxes per phrase)
523
+ if not isinstance(obj_boxes[0], Iterable):
524
+ obj_boxes = [obj_boxes]
525
+
526
+ for obj_box in obj_boxes:
527
+ # x_min, y_min, x_max, y_max = int(obj_box[0] * W), int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H)
528
+ x_min, y_min, x_max, y_max = scale_proportion(obj_box, H=H, W=W)
529
+ mask[y_min:y_max, x_min:x_max] = 1
530
+
531
+ for obj_position in phrase_indices[obj_idx]:
532
+ # Could potentially optimize to compute this for loop in batch.
533
+ # Could crop the ref cross attention before saving to save memory.
534
+
535
+ ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W)
536
+
537
+ # shape: (b, H * W)
538
+ ca_map_obj = attn_map[:, :, obj_position] # .reshape(b, H, W)
539
+ k_fg = (mask.sum() * fg_top_p).long().clamp_(min=1)
540
+ k_bg = ((1 - mask).sum() * bg_top_p).long().clamp_(min=1)
541
+
542
+ mask_1d = mask.view(1, -1)
543
+
544
+ # Max-based loss function
545
+
546
+ # Take the topk over spatial dimension, and then take the sum over heads dim
547
+ # The mean is over k_fg and k_bg dimension, so we don't need to sum and divide on our own.
548
+ obj_loss += (1 - (ca_map_obj * mask_1d).topk(k=k_fg).values.mean(dim=1)).sum(dim=0) * fg_weight
549
+ obj_loss += ((ca_map_obj * (1 - mask_1d)).topk(k=k_bg).values.mean(dim=1)).sum(dim=0) * bg_weight
550
+
551
+ loss += obj_loss / len(phrase_indices[obj_idx])
552
+
553
+ return loss
554
+
555
+ def compute_ca_loss(self, saved_attn, bboxes, phrase_indices, guidance_attn_keys, verbose=False, **kwargs):
556
+ """
557
+ The `saved_attn` is supposed to be passed to `save_attn_to_dict` in `cross_attention_kwargs` prior to computing ths loss.
558
+ `AttnProcessor` will put attention maps into the `save_attn_to_dict`.
559
+
560
+ `index` is the timestep.
561
+ `ref_ca_word_token_only`: This has precedence over `ref_ca_last_token_only` (i.e., if both are enabled, we take the token from word rather than the last token).
562
+ `ref_ca_last_token_only`: `ref_ca_saved_attn` comes from the attention map of the last token of the phrase in single object generation, so we apply it only to the last token of the phrase in overall generation if this is set to True. If set to False, `ref_ca_saved_attn` will be applied to all the text tokens.
563
+ """
564
+ loss = torch.tensor(0).float().cuda()
565
+ object_number = len(bboxes)
566
+ if object_number == 0:
567
+ return loss
568
+
569
+ for attn_key in guidance_attn_keys:
570
+ # We only have 1 cross attention for mid.
571
+
572
+ attn_map_integrated = saved_attn[attn_key]
573
+ if not attn_map_integrated.is_cuda:
574
+ attn_map_integrated = attn_map_integrated.cuda()
575
+ # Example dimension: [20, 64, 77]
576
+ attn_map = attn_map_integrated.squeeze(dim=0)
577
+
578
+ loss = self.add_ca_loss_per_attn_map_to_loss(
579
+ loss, attn_map, object_number, bboxes, phrase_indices, **kwargs
580
+ )
581
+
582
+ num_attn = len(guidance_attn_keys)
583
+
584
+ if num_attn > 0:
585
+ loss = loss / (object_number * num_attn)
586
+
587
+ return loss
588
+
589
+ @torch.no_grad()
590
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
591
+ def __call__(
592
+ self,
593
+ prompt: Union[str, List[str]] = None,
594
+ height: Optional[int] = None,
595
+ width: Optional[int] = None,
596
+ num_inference_steps: int = 50,
597
+ guidance_scale: float = 7.5,
598
+ gligen_scheduled_sampling_beta: float = 0.3,
599
+ phrases: List[str] = None,
600
+ boxes: List[List[float]] = None,
601
+ negative_prompt: Optional[Union[str, List[str]]] = None,
602
+ num_images_per_prompt: Optional[int] = 1,
603
+ eta: float = 0.0,
604
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
605
+ latents: Optional[torch.FloatTensor] = None,
606
+ prompt_embeds: Optional[torch.FloatTensor] = None,
607
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
608
+ output_type: Optional[str] = "pil",
609
+ return_dict: bool = True,
610
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
611
+ callback_steps: int = 1,
612
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
613
+ clip_skip: Optional[int] = None,
614
+ lmd_guidance_kwargs: Optional[Dict[str, Any]] = {},
615
+ phrase_indices: Optional[List[int]] = None,
616
+ ):
617
+ r"""
618
+ The call function to the pipeline for generation.
619
+
620
+ Args:
621
+ prompt (`str` or `List[str]`, *optional*):
622
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
623
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
624
+ The height in pixels of the generated image.
625
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
626
+ The width in pixels of the generated image.
627
+ num_inference_steps (`int`, *optional*, defaults to 50):
628
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
629
+ expense of slower inference.
630
+ guidance_scale (`float`, *optional*, defaults to 7.5):
631
+ A higher guidance scale value encourages the model to generate images closely linked to the text
632
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
633
+ phrases (`List[str]`):
634
+ The phrases to guide what to include in each of the regions defined by the corresponding
635
+ `boxes`. There should only be one phrase per bounding box.
636
+ boxes (`List[List[float]]`):
637
+ The bounding boxes that identify rectangular regions of the image that are going to be filled with the
638
+ content described by the corresponding `phrases`. Each rectangular box is defined as a
639
+ `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1].
640
+ gligen_scheduled_sampling_beta (`float`, defaults to 0.3):
641
+ Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image
642
+ Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for
643
+ scheduled sampling during inference for improved quality and controllability.
644
+ negative_prompt (`str` or `List[str]`, *optional*):
645
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
646
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
647
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
648
+ The number of images to generate per prompt.
649
+ eta (`float`, *optional*, defaults to 0.0):
650
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
651
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
652
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
653
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
654
+ generation deterministic.
655
+ latents (`torch.FloatTensor`, *optional*):
656
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
657
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
658
+ tensor is generated by sampling using the supplied random `generator`.
659
+ prompt_embeds (`torch.FloatTensor`, *optional*):
660
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
661
+ provided, text embeddings are generated from the `prompt` input argument.
662
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
663
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
664
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
665
+ output_type (`str`, *optional*, defaults to `"pil"`):
666
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
667
+ return_dict (`bool`, *optional*, defaults to `True`):
668
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
669
+ plain tuple.
670
+ callback (`Callable`, *optional*):
671
+ A function that calls every `callback_steps` steps during inference. The function is called with the
672
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
673
+ callback_steps (`int`, *optional*, defaults to 1):
674
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
675
+ every step.
676
+ cross_attention_kwargs (`dict`, *optional*):
677
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
678
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
679
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
680
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
681
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
682
+ using zero terminal SNR.
683
+ clip_skip (`int`, *optional*):
684
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
685
+ the output of the pre-final layer will be used for computing the prompt embeddings.
686
+ lmd_guidance_kwargs (`dict`, *optional*):
687
+ A kwargs dictionary that if specified is passed along to `latent_lmd_guidance` function. Useful keys include `loss_scale` (the guidance strength), `loss_threshold` (when loss is lower than this value, the guidance is not applied anymore), `max_iter` (the number of iterations of guidance for each step), and `guidance_timesteps` (the number of diffusion timesteps to apply guidance on). See `latent_lmd_guidance` for implementation details.
688
+ phrase_indices (`list` of `list`, *optional*): The indices of the tokens of each phrase in the overall prompt. If omitted, the pipeline will match the first token subsequence. The pipeline will append the missing phrases to the end of the prompt by default.
689
+ Examples:
690
+
691
+ Returns:
692
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
693
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
694
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
695
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
696
+ "not-safe-for-work" (nsfw) content.
697
+ """
698
+ # 0. Default height and width to unet
699
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
700
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
701
+
702
+ # 1. Check inputs. Raise error if not correct
703
+ self.check_inputs(
704
+ prompt,
705
+ height,
706
+ width,
707
+ callback_steps,
708
+ phrases,
709
+ boxes,
710
+ negative_prompt,
711
+ prompt_embeds,
712
+ negative_prompt_embeds,
713
+ phrase_indices,
714
+ )
715
+
716
+ # 2. Define call parameters
717
+ if prompt is not None and isinstance(prompt, str):
718
+ batch_size = 1
719
+ if phrase_indices is None:
720
+ phrase_indices, prompt = self.get_phrase_indices(prompt, phrases, add_suffix_if_not_found=True)
721
+ elif prompt is not None and isinstance(prompt, list):
722
+ batch_size = len(prompt)
723
+ if phrase_indices is None:
724
+ phrase_indices = []
725
+ prompt_parsed = []
726
+ for prompt_item in prompt:
727
+ phrase_indices_parsed_item, prompt_parsed_item = self.get_phrase_indices(
728
+ prompt_item, add_suffix_if_not_found=True
729
+ )
730
+ phrase_indices.append(phrase_indices_parsed_item)
731
+ prompt_parsed.append(prompt_parsed_item)
732
+ prompt = prompt_parsed
733
+ else:
734
+ batch_size = prompt_embeds.shape[0]
735
+
736
+ device = self._execution_device
737
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
738
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
739
+ # corresponds to doing no classifier free guidance.
740
+ do_classifier_free_guidance = guidance_scale > 1.0
741
+
742
+ # 3. Encode input prompt
743
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
744
+ prompt,
745
+ device,
746
+ num_images_per_prompt,
747
+ do_classifier_free_guidance,
748
+ negative_prompt,
749
+ prompt_embeds=prompt_embeds,
750
+ negative_prompt_embeds=negative_prompt_embeds,
751
+ clip_skip=clip_skip,
752
+ )
753
+
754
+ cond_prompt_embeds = prompt_embeds
755
+
756
+ # For classifier free guidance, we need to do two forward passes.
757
+ # Here we concatenate the unconditional and text embeddings into a single batch
758
+ # to avoid doing two forward passes
759
+ if do_classifier_free_guidance:
760
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
761
+
762
+ # 4. Prepare timesteps
763
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
764
+ timesteps = self.scheduler.timesteps
765
+
766
+ # 5. Prepare latent variables
767
+ num_channels_latents = self.unet.config.in_channels
768
+ latents = self.prepare_latents(
769
+ batch_size * num_images_per_prompt,
770
+ num_channels_latents,
771
+ height,
772
+ width,
773
+ prompt_embeds.dtype,
774
+ device,
775
+ generator,
776
+ latents,
777
+ )
778
+
779
+ # 5.1 Prepare GLIGEN variables
780
+ max_objs = 30
781
+ if len(boxes) > max_objs:
782
+ warnings.warn(
783
+ f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.",
784
+ FutureWarning,
785
+ )
786
+ phrases = phrases[:max_objs]
787
+ boxes = boxes[:max_objs]
788
+
789
+ n_objs = len(boxes)
790
+ if n_objs:
791
+ # prepare batched input to the PositionNet (boxes, phrases, mask)
792
+ # Get tokens for phrases from pre-trained CLIPTokenizer
793
+ tokenizer_inputs = self.tokenizer(phrases, padding=True, return_tensors="pt").to(device)
794
+ # For the token, we use the same pre-trained text encoder
795
+ # to obtain its text feature
796
+ _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output
797
+
798
+ # For each entity, described in phrases, is denoted with a bounding box,
799
+ # we represent the location information as (xmin,ymin,xmax,ymax)
800
+ cond_boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype)
801
+ if n_objs:
802
+ cond_boxes[:n_objs] = torch.tensor(boxes)
803
+ text_embeddings = torch.zeros(
804
+ max_objs, self.unet.config.cross_attention_dim, device=device, dtype=self.text_encoder.dtype
805
+ )
806
+ if n_objs:
807
+ text_embeddings[:n_objs] = _text_embeddings
808
+ # Generate a mask for each object that is entity described by phrases
809
+ masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype)
810
+ masks[:n_objs] = 1
811
+
812
+ repeat_batch = batch_size * num_images_per_prompt
813
+ cond_boxes = cond_boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
814
+ text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
815
+ masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone()
816
+ if do_classifier_free_guidance:
817
+ repeat_batch = repeat_batch * 2
818
+ cond_boxes = torch.cat([cond_boxes] * 2)
819
+ text_embeddings = torch.cat([text_embeddings] * 2)
820
+ masks = torch.cat([masks] * 2)
821
+ masks[: repeat_batch // 2] = 0
822
+ if cross_attention_kwargs is None:
823
+ cross_attention_kwargs = {}
824
+ cross_attention_kwargs["gligen"] = {
825
+ "boxes": cond_boxes,
826
+ "positive_embeddings": text_embeddings,
827
+ "masks": masks,
828
+ }
829
+
830
+ num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps))
831
+ self.enable_fuser(True)
832
+
833
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
834
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
835
+
836
+ loss_attn = torch.tensor(10000.0)
837
+
838
+ # 7. Denoising loop
839
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
840
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
841
+ for i, t in enumerate(timesteps):
842
+ # Scheduled sampling
843
+ if i == num_grounding_steps:
844
+ self.enable_fuser(False)
845
+
846
+ if latents.shape[1] != 4:
847
+ latents = torch.randn_like(latents[:, :4])
848
+
849
+ # 7.1 Perform LMD guidance
850
+ if boxes:
851
+ latents, loss_attn = self.latent_lmd_guidance(
852
+ cond_prompt_embeds,
853
+ index=i,
854
+ boxes=boxes,
855
+ phrase_indices=phrase_indices,
856
+ t=t,
857
+ latents=latents,
858
+ loss=loss_attn,
859
+ **lmd_guidance_kwargs,
860
+ )
861
+
862
+ # expand the latents if we are doing classifier free guidance
863
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
864
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
865
+
866
+ # predict the noise residual
867
+ noise_pred = self.unet(
868
+ latent_model_input,
869
+ t,
870
+ encoder_hidden_states=prompt_embeds,
871
+ cross_attention_kwargs=cross_attention_kwargs,
872
+ ).sample
873
+
874
+ # perform guidance
875
+ if do_classifier_free_guidance:
876
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
877
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
878
+
879
+ # compute the previous noisy sample x_t -> x_t-1
880
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
881
+
882
+ # call the callback, if provided
883
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
884
+ progress_bar.update()
885
+ if callback is not None and i % callback_steps == 0:
886
+ step_idx = i // getattr(self.scheduler, "order", 1)
887
+ callback(step_idx, t, latents)
888
+
889
+ if not output_type == "latent":
890
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
891
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
892
+ else:
893
+ image = latents
894
+ has_nsfw_concept = None
895
+
896
+ if has_nsfw_concept is None:
897
+ do_denormalize = [True] * image.shape[0]
898
+ else:
899
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
900
+
901
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
902
+
903
+ # Offload last model to CPU
904
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
905
+ self.final_offload_hook.offload()
906
+
907
+ if not return_dict:
908
+ return (image, has_nsfw_concept)
909
+
910
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
911
+
912
+ @torch.set_grad_enabled(True)
913
+ def latent_lmd_guidance(
914
+ self,
915
+ cond_embeddings,
916
+ index,
917
+ boxes,
918
+ phrase_indices,
919
+ t,
920
+ latents,
921
+ loss,
922
+ *,
923
+ loss_scale=20,
924
+ loss_threshold=5.0,
925
+ max_iter=[3] * 5 + [2] * 5 + [1] * 5,
926
+ guidance_timesteps=15,
927
+ cross_attention_kwargs=None,
928
+ guidance_attn_keys=DEFAULT_GUIDANCE_ATTN_KEYS,
929
+ verbose=False,
930
+ clear_cache=False,
931
+ unet_additional_kwargs={},
932
+ guidance_callback=None,
933
+ **kwargs,
934
+ ):
935
+ scheduler, unet = self.scheduler, self.unet
936
+
937
+ iteration = 0
938
+
939
+ if index < guidance_timesteps:
940
+ if isinstance(max_iter, list):
941
+ max_iter = max_iter[index]
942
+
943
+ if verbose:
944
+ logger.info(
945
+ f"time index {index}, loss: {loss.item()/loss_scale:.3f} (de-scaled with scale {loss_scale:.1f}), loss threshold: {loss_threshold:.3f}"
946
+ )
947
+
948
+ try:
949
+ self.enable_attn_hook(enabled=True)
950
+
951
+ while (
952
+ loss.item() / loss_scale > loss_threshold and iteration < max_iter and index < guidance_timesteps
953
+ ):
954
+ self._saved_attn = {}
955
+
956
+ latents.requires_grad_(True)
957
+ latent_model_input = latents
958
+ latent_model_input = scheduler.scale_model_input(latent_model_input, t)
959
+
960
+ unet(
961
+ latent_model_input,
962
+ t,
963
+ encoder_hidden_states=cond_embeddings,
964
+ cross_attention_kwargs=cross_attention_kwargs,
965
+ **unet_additional_kwargs,
966
+ )
967
+
968
+ # update latents with guidance
969
+ loss = (
970
+ self.compute_ca_loss(
971
+ saved_attn=self._saved_attn,
972
+ bboxes=boxes,
973
+ phrase_indices=phrase_indices,
974
+ guidance_attn_keys=guidance_attn_keys,
975
+ verbose=verbose,
976
+ **kwargs,
977
+ )
978
+ * loss_scale
979
+ )
980
+
981
+ if torch.isnan(loss):
982
+ raise RuntimeError("**Loss is NaN**")
983
+
984
+ # This callback allows visualizations.
985
+ if guidance_callback is not None:
986
+ guidance_callback(self, latents, loss, iteration, index)
987
+
988
+ self._saved_attn = None
989
+
990
+ grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents])[0]
991
+
992
+ latents.requires_grad_(False)
993
+
994
+ # Scaling with classifier guidance
995
+ alpha_prod_t = scheduler.alphas_cumprod[t]
996
+ # Classifier guidance: https://arxiv.org/pdf/2105.05233.pdf
997
+ # DDIM: https://arxiv.org/pdf/2010.02502.pdf
998
+ scale = (1 - alpha_prod_t) ** (0.5)
999
+ latents = latents - scale * grad_cond
1000
+
1001
+ iteration += 1
1002
+
1003
+ if clear_cache:
1004
+ gc.collect()
1005
+ torch.cuda.empty_cache()
1006
+
1007
+ if verbose:
1008
+ logger.info(
1009
+ f"time index {index}, loss: {loss.item()/loss_scale:.3f}, loss threshold: {loss_threshold:.3f}, iteration: {iteration}"
1010
+ )
1011
+
1012
+ finally:
1013
+ self.enable_attn_hook(enabled=False)
1014
+
1015
+ return latents, loss
v0.24.0/lpw_stable_diffusion.py ADDED
@@ -0,0 +1,1471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Any, Callable, Dict, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from packaging import version
9
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
+
11
+ from diffusers import DiffusionPipeline
12
+ from diffusers.configuration_utils import FrozenDict
13
+ from diffusers.image_processor import VaeImageProcessor
14
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
15
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
17
+ from diffusers.schedulers import KarrasDiffusionSchedulers
18
+ from diffusers.utils import (
19
+ PIL_INTERPOLATION,
20
+ deprecate,
21
+ is_accelerate_available,
22
+ is_accelerate_version,
23
+ logging,
24
+ )
25
+ from diffusers.utils.torch_utils import randn_tensor
26
+
27
+
28
+ # ------------------------------------------------------------------------------
29
+
30
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
+
32
+ re_attention = re.compile(
33
+ r"""
34
+ \\\(|
35
+ \\\)|
36
+ \\\[|
37
+ \\]|
38
+ \\\\|
39
+ \\|
40
+ \(|
41
+ \[|
42
+ :([+-]?[.\d]+)\)|
43
+ \)|
44
+ ]|
45
+ [^\\()\[\]:]+|
46
+ :
47
+ """,
48
+ re.X,
49
+ )
50
+
51
+
52
+ def parse_prompt_attention(text):
53
+ """
54
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
55
+ Accepted tokens are:
56
+ (abc) - increases attention to abc by a multiplier of 1.1
57
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
58
+ [abc] - decreases attention to abc by a multiplier of 1.1
59
+ \\( - literal character '('
60
+ \\[ - literal character '['
61
+ \\) - literal character ')'
62
+ \\] - literal character ']'
63
+ \\ - literal character '\'
64
+ anything else - just text
65
+ >>> parse_prompt_attention('normal text')
66
+ [['normal text', 1.0]]
67
+ >>> parse_prompt_attention('an (important) word')
68
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
69
+ >>> parse_prompt_attention('(unbalanced')
70
+ [['unbalanced', 1.1]]
71
+ >>> parse_prompt_attention('\\(literal\\]')
72
+ [['(literal]', 1.0]]
73
+ >>> parse_prompt_attention('(unnecessary)(parens)')
74
+ [['unnecessaryparens', 1.1]]
75
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
76
+ [['a ', 1.0],
77
+ ['house', 1.5730000000000004],
78
+ [' ', 1.1],
79
+ ['on', 1.0],
80
+ [' a ', 1.1],
81
+ ['hill', 0.55],
82
+ [', sun, ', 1.1],
83
+ ['sky', 1.4641000000000006],
84
+ ['.', 1.1]]
85
+ """
86
+
87
+ res = []
88
+ round_brackets = []
89
+ square_brackets = []
90
+
91
+ round_bracket_multiplier = 1.1
92
+ square_bracket_multiplier = 1 / 1.1
93
+
94
+ def multiply_range(start_position, multiplier):
95
+ for p in range(start_position, len(res)):
96
+ res[p][1] *= multiplier
97
+
98
+ for m in re_attention.finditer(text):
99
+ text = m.group(0)
100
+ weight = m.group(1)
101
+
102
+ if text.startswith("\\"):
103
+ res.append([text[1:], 1.0])
104
+ elif text == "(":
105
+ round_brackets.append(len(res))
106
+ elif text == "[":
107
+ square_brackets.append(len(res))
108
+ elif weight is not None and len(round_brackets) > 0:
109
+ multiply_range(round_brackets.pop(), float(weight))
110
+ elif text == ")" and len(round_brackets) > 0:
111
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
112
+ elif text == "]" and len(square_brackets) > 0:
113
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
114
+ else:
115
+ res.append([text, 1.0])
116
+
117
+ for pos in round_brackets:
118
+ multiply_range(pos, round_bracket_multiplier)
119
+
120
+ for pos in square_brackets:
121
+ multiply_range(pos, square_bracket_multiplier)
122
+
123
+ if len(res) == 0:
124
+ res = [["", 1.0]]
125
+
126
+ # merge runs of identical weights
127
+ i = 0
128
+ while i + 1 < len(res):
129
+ if res[i][1] == res[i + 1][1]:
130
+ res[i][0] += res[i + 1][0]
131
+ res.pop(i + 1)
132
+ else:
133
+ i += 1
134
+
135
+ return res
136
+
137
+
138
+ def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
139
+ r"""
140
+ Tokenize a list of prompts and return its tokens with weights of each token.
141
+
142
+ No padding, starting or ending token is included.
143
+ """
144
+ tokens = []
145
+ weights = []
146
+ truncated = False
147
+ for text in prompt:
148
+ texts_and_weights = parse_prompt_attention(text)
149
+ text_token = []
150
+ text_weight = []
151
+ for word, weight in texts_and_weights:
152
+ # tokenize and discard the starting and the ending token
153
+ token = pipe.tokenizer(word).input_ids[1:-1]
154
+ text_token += token
155
+ # copy the weight by length of token
156
+ text_weight += [weight] * len(token)
157
+ # stop if the text is too long (longer than truncation limit)
158
+ if len(text_token) > max_length:
159
+ truncated = True
160
+ break
161
+ # truncate
162
+ if len(text_token) > max_length:
163
+ truncated = True
164
+ text_token = text_token[:max_length]
165
+ text_weight = text_weight[:max_length]
166
+ tokens.append(text_token)
167
+ weights.append(text_weight)
168
+ if truncated:
169
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
170
+ return tokens, weights
171
+
172
+
173
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
174
+ r"""
175
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
176
+ """
177
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
178
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
179
+ for i in range(len(tokens)):
180
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
181
+ if no_boseos_middle:
182
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
183
+ else:
184
+ w = []
185
+ if len(weights[i]) == 0:
186
+ w = [1.0] * weights_length
187
+ else:
188
+ for j in range(max_embeddings_multiples):
189
+ w.append(1.0) # weight for starting token in this chunk
190
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
191
+ w.append(1.0) # weight for ending token in this chunk
192
+ w += [1.0] * (weights_length - len(w))
193
+ weights[i] = w[:]
194
+
195
+ return tokens, weights
196
+
197
+
198
+ def get_unweighted_text_embeddings(
199
+ pipe: DiffusionPipeline,
200
+ text_input: torch.Tensor,
201
+ chunk_length: int,
202
+ no_boseos_middle: Optional[bool] = True,
203
+ ):
204
+ """
205
+ When the length of tokens is a multiple of the capacity of the text encoder,
206
+ it should be split into chunks and sent to the text encoder individually.
207
+ """
208
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
209
+ if max_embeddings_multiples > 1:
210
+ text_embeddings = []
211
+ for i in range(max_embeddings_multiples):
212
+ # extract the i-th chunk
213
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
214
+
215
+ # cover the head and the tail by the starting and the ending tokens
216
+ text_input_chunk[:, 0] = text_input[0, 0]
217
+ text_input_chunk[:, -1] = text_input[0, -1]
218
+ text_embedding = pipe.text_encoder(text_input_chunk)[0]
219
+
220
+ if no_boseos_middle:
221
+ if i == 0:
222
+ # discard the ending token
223
+ text_embedding = text_embedding[:, :-1]
224
+ elif i == max_embeddings_multiples - 1:
225
+ # discard the starting token
226
+ text_embedding = text_embedding[:, 1:]
227
+ else:
228
+ # discard both starting and ending tokens
229
+ text_embedding = text_embedding[:, 1:-1]
230
+
231
+ text_embeddings.append(text_embedding)
232
+ text_embeddings = torch.concat(text_embeddings, axis=1)
233
+ else:
234
+ text_embeddings = pipe.text_encoder(text_input)[0]
235
+ return text_embeddings
236
+
237
+
238
+ def get_weighted_text_embeddings(
239
+ pipe: DiffusionPipeline,
240
+ prompt: Union[str, List[str]],
241
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
242
+ max_embeddings_multiples: Optional[int] = 3,
243
+ no_boseos_middle: Optional[bool] = False,
244
+ skip_parsing: Optional[bool] = False,
245
+ skip_weighting: Optional[bool] = False,
246
+ ):
247
+ r"""
248
+ Prompts can be assigned with local weights using brackets. For example,
249
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
250
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
251
+
252
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
253
+
254
+ Args:
255
+ pipe (`DiffusionPipeline`):
256
+ Pipe to provide access to the tokenizer and the text encoder.
257
+ prompt (`str` or `List[str]`):
258
+ The prompt or prompts to guide the image generation.
259
+ uncond_prompt (`str` or `List[str]`):
260
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
261
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
262
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
263
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
264
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
265
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
266
+ ending token in each of the chunk in the middle.
267
+ skip_parsing (`bool`, *optional*, defaults to `False`):
268
+ Skip the parsing of brackets.
269
+ skip_weighting (`bool`, *optional*, defaults to `False`):
270
+ Skip the weighting. When the parsing is skipped, it is forced True.
271
+ """
272
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
273
+ if isinstance(prompt, str):
274
+ prompt = [prompt]
275
+
276
+ if not skip_parsing:
277
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
278
+ if uncond_prompt is not None:
279
+ if isinstance(uncond_prompt, str):
280
+ uncond_prompt = [uncond_prompt]
281
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
282
+ else:
283
+ prompt_tokens = [
284
+ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
285
+ ]
286
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
287
+ if uncond_prompt is not None:
288
+ if isinstance(uncond_prompt, str):
289
+ uncond_prompt = [uncond_prompt]
290
+ uncond_tokens = [
291
+ token[1:-1]
292
+ for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
293
+ ]
294
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
295
+
296
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
297
+ max_length = max([len(token) for token in prompt_tokens])
298
+ if uncond_prompt is not None:
299
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
300
+
301
+ max_embeddings_multiples = min(
302
+ max_embeddings_multiples,
303
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
304
+ )
305
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
306
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
307
+
308
+ # pad the length of tokens and weights
309
+ bos = pipe.tokenizer.bos_token_id
310
+ eos = pipe.tokenizer.eos_token_id
311
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
312
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
313
+ prompt_tokens,
314
+ prompt_weights,
315
+ max_length,
316
+ bos,
317
+ eos,
318
+ pad,
319
+ no_boseos_middle=no_boseos_middle,
320
+ chunk_length=pipe.tokenizer.model_max_length,
321
+ )
322
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
323
+ if uncond_prompt is not None:
324
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
325
+ uncond_tokens,
326
+ uncond_weights,
327
+ max_length,
328
+ bos,
329
+ eos,
330
+ pad,
331
+ no_boseos_middle=no_boseos_middle,
332
+ chunk_length=pipe.tokenizer.model_max_length,
333
+ )
334
+ uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
335
+
336
+ # get the embeddings
337
+ text_embeddings = get_unweighted_text_embeddings(
338
+ pipe,
339
+ prompt_tokens,
340
+ pipe.tokenizer.model_max_length,
341
+ no_boseos_middle=no_boseos_middle,
342
+ )
343
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
344
+ if uncond_prompt is not None:
345
+ uncond_embeddings = get_unweighted_text_embeddings(
346
+ pipe,
347
+ uncond_tokens,
348
+ pipe.tokenizer.model_max_length,
349
+ no_boseos_middle=no_boseos_middle,
350
+ )
351
+ uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
352
+
353
+ # assign weights to the prompts and normalize in the sense of mean
354
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
355
+ if (not skip_parsing) and (not skip_weighting):
356
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
357
+ text_embeddings *= prompt_weights.unsqueeze(-1)
358
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
359
+ text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
360
+ if uncond_prompt is not None:
361
+ previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
362
+ uncond_embeddings *= uncond_weights.unsqueeze(-1)
363
+ current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
364
+ uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
365
+
366
+ if uncond_prompt is not None:
367
+ return text_embeddings, uncond_embeddings
368
+ return text_embeddings, None
369
+
370
+
371
+ def preprocess_image(image, batch_size):
372
+ w, h = image.size
373
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
374
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
375
+ image = np.array(image).astype(np.float32) / 255.0
376
+ image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
377
+ image = torch.from_numpy(image)
378
+ return 2.0 * image - 1.0
379
+
380
+
381
+ def preprocess_mask(mask, batch_size, scale_factor=8):
382
+ if not isinstance(mask, torch.FloatTensor):
383
+ mask = mask.convert("L")
384
+ w, h = mask.size
385
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
386
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
387
+ mask = np.array(mask).astype(np.float32) / 255.0
388
+ mask = np.tile(mask, (4, 1, 1))
389
+ mask = np.vstack([mask[None]] * batch_size)
390
+ mask = 1 - mask # repaint white, keep black
391
+ mask = torch.from_numpy(mask)
392
+ return mask
393
+
394
+ else:
395
+ valid_mask_channel_sizes = [1, 3]
396
+ # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
397
+ if mask.shape[3] in valid_mask_channel_sizes:
398
+ mask = mask.permute(0, 3, 1, 2)
399
+ elif mask.shape[1] not in valid_mask_channel_sizes:
400
+ raise ValueError(
401
+ f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
402
+ f" but received mask of shape {tuple(mask.shape)}"
403
+ )
404
+ # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
405
+ mask = mask.mean(dim=1, keepdim=True)
406
+ h, w = mask.shape[-2:]
407
+ h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
408
+ mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
409
+ return mask
410
+
411
+
412
+ class StableDiffusionLongPromptWeightingPipeline(
413
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
414
+ ):
415
+ r"""
416
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
417
+ weighting in prompt.
418
+
419
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
420
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
421
+
422
+ Args:
423
+ vae ([`AutoencoderKL`]):
424
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
425
+ text_encoder ([`CLIPTextModel`]):
426
+ Frozen text-encoder. Stable Diffusion uses the text portion of
427
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
428
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
429
+ tokenizer (`CLIPTokenizer`):
430
+ Tokenizer of class
431
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
432
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
433
+ scheduler ([`SchedulerMixin`]):
434
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
435
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
436
+ safety_checker ([`StableDiffusionSafetyChecker`]):
437
+ Classification module that estimates whether generated images could be considered offensive or harmful.
438
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
439
+ feature_extractor ([`CLIPImageProcessor`]):
440
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
441
+ """
442
+
443
+ _optional_components = ["safety_checker", "feature_extractor"]
444
+
445
+ def __init__(
446
+ self,
447
+ vae: AutoencoderKL,
448
+ text_encoder: CLIPTextModel,
449
+ tokenizer: CLIPTokenizer,
450
+ unet: UNet2DConditionModel,
451
+ scheduler: KarrasDiffusionSchedulers,
452
+ safety_checker: StableDiffusionSafetyChecker,
453
+ feature_extractor: CLIPImageProcessor,
454
+ requires_safety_checker: bool = True,
455
+ ):
456
+ super().__init__()
457
+
458
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
459
+ deprecation_message = (
460
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
461
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
462
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
463
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
464
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
465
+ " file"
466
+ )
467
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
468
+ new_config = dict(scheduler.config)
469
+ new_config["steps_offset"] = 1
470
+ scheduler._internal_dict = FrozenDict(new_config)
471
+
472
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
473
+ deprecation_message = (
474
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
475
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
476
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
477
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
478
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
479
+ )
480
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
481
+ new_config = dict(scheduler.config)
482
+ new_config["clip_sample"] = False
483
+ scheduler._internal_dict = FrozenDict(new_config)
484
+
485
+ if safety_checker is None and requires_safety_checker:
486
+ logger.warning(
487
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
488
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
489
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
490
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
491
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
492
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
493
+ )
494
+
495
+ if safety_checker is not None and feature_extractor is None:
496
+ raise ValueError(
497
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
498
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
499
+ )
500
+
501
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
502
+ version.parse(unet.config._diffusers_version).base_version
503
+ ) < version.parse("0.9.0.dev0")
504
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
505
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
506
+ deprecation_message = (
507
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
508
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
509
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
510
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
511
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
512
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
513
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
514
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
515
+ " the `unet/config.json` file"
516
+ )
517
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
518
+ new_config = dict(unet.config)
519
+ new_config["sample_size"] = 64
520
+ unet._internal_dict = FrozenDict(new_config)
521
+ self.register_modules(
522
+ vae=vae,
523
+ text_encoder=text_encoder,
524
+ tokenizer=tokenizer,
525
+ unet=unet,
526
+ scheduler=scheduler,
527
+ safety_checker=safety_checker,
528
+ feature_extractor=feature_extractor,
529
+ )
530
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
531
+
532
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
533
+ self.register_to_config(
534
+ requires_safety_checker=requires_safety_checker,
535
+ )
536
+
537
+ def enable_vae_slicing(self):
538
+ r"""
539
+ Enable sliced VAE decoding.
540
+
541
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
542
+ steps. This is useful to save some memory and allow larger batch sizes.
543
+ """
544
+ self.vae.enable_slicing()
545
+
546
+ def disable_vae_slicing(self):
547
+ r"""
548
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
549
+ computing decoding in one step.
550
+ """
551
+ self.vae.disable_slicing()
552
+
553
+ def enable_vae_tiling(self):
554
+ r"""
555
+ Enable tiled VAE decoding.
556
+
557
+ When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
558
+ several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
559
+ """
560
+ self.vae.enable_tiling()
561
+
562
+ def disable_vae_tiling(self):
563
+ r"""
564
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
565
+ computing decoding in one step.
566
+ """
567
+ self.vae.disable_tiling()
568
+
569
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
570
+ def enable_sequential_cpu_offload(self, gpu_id=0):
571
+ r"""
572
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
573
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
574
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
575
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
576
+ `enable_model_cpu_offload`, but performance is lower.
577
+ """
578
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
579
+ from accelerate import cpu_offload
580
+ else:
581
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
582
+
583
+ device = torch.device(f"cuda:{gpu_id}")
584
+
585
+ if self.device.type != "cpu":
586
+ self.to("cpu", silence_dtype_warnings=True)
587
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
588
+
589
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
590
+ cpu_offload(cpu_offloaded_model, device)
591
+
592
+ if self.safety_checker is not None:
593
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
594
+
595
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
596
+ def enable_model_cpu_offload(self, gpu_id=0):
597
+ r"""
598
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
599
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
600
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
601
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
602
+ """
603
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
604
+ from accelerate import cpu_offload_with_hook
605
+ else:
606
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
607
+
608
+ device = torch.device(f"cuda:{gpu_id}")
609
+
610
+ if self.device.type != "cpu":
611
+ self.to("cpu", silence_dtype_warnings=True)
612
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
613
+
614
+ hook = None
615
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
616
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
617
+
618
+ if self.safety_checker is not None:
619
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
620
+
621
+ # We'll offload the last model manually.
622
+ self.final_offload_hook = hook
623
+
624
+ @property
625
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
626
+ def _execution_device(self):
627
+ r"""
628
+ Returns the device on which the pipeline's models will be executed. After calling
629
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
630
+ hooks.
631
+ """
632
+ if not hasattr(self.unet, "_hf_hook"):
633
+ return self.device
634
+ for module in self.unet.modules():
635
+ if (
636
+ hasattr(module, "_hf_hook")
637
+ and hasattr(module._hf_hook, "execution_device")
638
+ and module._hf_hook.execution_device is not None
639
+ ):
640
+ return torch.device(module._hf_hook.execution_device)
641
+ return self.device
642
+
643
+ def _encode_prompt(
644
+ self,
645
+ prompt,
646
+ device,
647
+ num_images_per_prompt,
648
+ do_classifier_free_guidance,
649
+ negative_prompt=None,
650
+ max_embeddings_multiples=3,
651
+ prompt_embeds: Optional[torch.FloatTensor] = None,
652
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
653
+ ):
654
+ r"""
655
+ Encodes the prompt into text encoder hidden states.
656
+
657
+ Args:
658
+ prompt (`str` or `list(int)`):
659
+ prompt to be encoded
660
+ device: (`torch.device`):
661
+ torch device
662
+ num_images_per_prompt (`int`):
663
+ number of images that should be generated per prompt
664
+ do_classifier_free_guidance (`bool`):
665
+ whether to use classifier free guidance or not
666
+ negative_prompt (`str` or `List[str]`):
667
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
668
+ if `guidance_scale` is less than `1`).
669
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
670
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
671
+ """
672
+ if prompt is not None and isinstance(prompt, str):
673
+ batch_size = 1
674
+ elif prompt is not None and isinstance(prompt, list):
675
+ batch_size = len(prompt)
676
+ else:
677
+ batch_size = prompt_embeds.shape[0]
678
+
679
+ if negative_prompt_embeds is None:
680
+ if negative_prompt is None:
681
+ negative_prompt = [""] * batch_size
682
+ elif isinstance(negative_prompt, str):
683
+ negative_prompt = [negative_prompt] * batch_size
684
+ if batch_size != len(negative_prompt):
685
+ raise ValueError(
686
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
687
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
688
+ " the batch size of `prompt`."
689
+ )
690
+ if prompt_embeds is None or negative_prompt_embeds is None:
691
+ if isinstance(self, TextualInversionLoaderMixin):
692
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
693
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
694
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
695
+
696
+ prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
697
+ pipe=self,
698
+ prompt=prompt,
699
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
700
+ max_embeddings_multiples=max_embeddings_multiples,
701
+ )
702
+ if prompt_embeds is None:
703
+ prompt_embeds = prompt_embeds1
704
+ if negative_prompt_embeds is None:
705
+ negative_prompt_embeds = negative_prompt_embeds1
706
+
707
+ bs_embed, seq_len, _ = prompt_embeds.shape
708
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
709
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
710
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
711
+
712
+ if do_classifier_free_guidance:
713
+ bs_embed, seq_len, _ = negative_prompt_embeds.shape
714
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
715
+ negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
716
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
717
+
718
+ return prompt_embeds
719
+
720
+ def check_inputs(
721
+ self,
722
+ prompt,
723
+ height,
724
+ width,
725
+ strength,
726
+ callback_steps,
727
+ negative_prompt=None,
728
+ prompt_embeds=None,
729
+ negative_prompt_embeds=None,
730
+ ):
731
+ if height % 8 != 0 or width % 8 != 0:
732
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
733
+
734
+ if strength < 0 or strength > 1:
735
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
736
+
737
+ if (callback_steps is None) or (
738
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
739
+ ):
740
+ raise ValueError(
741
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
742
+ f" {type(callback_steps)}."
743
+ )
744
+
745
+ if prompt is not None and prompt_embeds is not None:
746
+ raise ValueError(
747
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
748
+ " only forward one of the two."
749
+ )
750
+ elif prompt is None and prompt_embeds is None:
751
+ raise ValueError(
752
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
753
+ )
754
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
755
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
756
+
757
+ if negative_prompt is not None and negative_prompt_embeds is not None:
758
+ raise ValueError(
759
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
760
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
761
+ )
762
+
763
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
764
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
765
+ raise ValueError(
766
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
767
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
768
+ f" {negative_prompt_embeds.shape}."
769
+ )
770
+
771
+ def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
772
+ if is_text2img:
773
+ return self.scheduler.timesteps.to(device), num_inference_steps
774
+ else:
775
+ # get the original timestep using init_timestep
776
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
777
+
778
+ t_start = max(num_inference_steps - init_timestep, 0)
779
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
780
+
781
+ return timesteps, num_inference_steps - t_start
782
+
783
+ def run_safety_checker(self, image, device, dtype):
784
+ if self.safety_checker is not None:
785
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
786
+ image, has_nsfw_concept = self.safety_checker(
787
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
788
+ )
789
+ else:
790
+ has_nsfw_concept = None
791
+ return image, has_nsfw_concept
792
+
793
+ def decode_latents(self, latents):
794
+ latents = 1 / self.vae.config.scaling_factor * latents
795
+ image = self.vae.decode(latents).sample
796
+ image = (image / 2 + 0.5).clamp(0, 1)
797
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
798
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
799
+ return image
800
+
801
+ def prepare_extra_step_kwargs(self, generator, eta):
802
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
803
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
804
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
805
+ # and should be between [0, 1]
806
+
807
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
808
+ extra_step_kwargs = {}
809
+ if accepts_eta:
810
+ extra_step_kwargs["eta"] = eta
811
+
812
+ # check if the scheduler accepts generator
813
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
814
+ if accepts_generator:
815
+ extra_step_kwargs["generator"] = generator
816
+ return extra_step_kwargs
817
+
818
+ def prepare_latents(
819
+ self,
820
+ image,
821
+ timestep,
822
+ num_images_per_prompt,
823
+ batch_size,
824
+ num_channels_latents,
825
+ height,
826
+ width,
827
+ dtype,
828
+ device,
829
+ generator,
830
+ latents=None,
831
+ ):
832
+ if image is None:
833
+ batch_size = batch_size * num_images_per_prompt
834
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
835
+ if isinstance(generator, list) and len(generator) != batch_size:
836
+ raise ValueError(
837
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
838
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
839
+ )
840
+
841
+ if latents is None:
842
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
843
+ else:
844
+ latents = latents.to(device)
845
+
846
+ # scale the initial noise by the standard deviation required by the scheduler
847
+ latents = latents * self.scheduler.init_noise_sigma
848
+ return latents, None, None
849
+ else:
850
+ image = image.to(device=self.device, dtype=dtype)
851
+ init_latent_dist = self.vae.encode(image).latent_dist
852
+ init_latents = init_latent_dist.sample(generator=generator)
853
+ init_latents = self.vae.config.scaling_factor * init_latents
854
+
855
+ # Expand init_latents for batch_size and num_images_per_prompt
856
+ init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
857
+ init_latents_orig = init_latents
858
+
859
+ # add noise to latents using the timesteps
860
+ noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
861
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
862
+ latents = init_latents
863
+ return latents, init_latents_orig, noise
864
+
865
+ @torch.no_grad()
866
+ def __call__(
867
+ self,
868
+ prompt: Union[str, List[str]],
869
+ negative_prompt: Optional[Union[str, List[str]]] = None,
870
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
871
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
872
+ height: int = 512,
873
+ width: int = 512,
874
+ num_inference_steps: int = 50,
875
+ guidance_scale: float = 7.5,
876
+ strength: float = 0.8,
877
+ num_images_per_prompt: Optional[int] = 1,
878
+ add_predicted_noise: Optional[bool] = False,
879
+ eta: float = 0.0,
880
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
881
+ latents: Optional[torch.FloatTensor] = None,
882
+ prompt_embeds: Optional[torch.FloatTensor] = None,
883
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
884
+ max_embeddings_multiples: Optional[int] = 3,
885
+ output_type: Optional[str] = "pil",
886
+ return_dict: bool = True,
887
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
888
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
889
+ callback_steps: int = 1,
890
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
891
+ ):
892
+ r"""
893
+ Function invoked when calling the pipeline for generation.
894
+
895
+ Args:
896
+ prompt (`str` or `List[str]`):
897
+ The prompt or prompts to guide the image generation.
898
+ negative_prompt (`str` or `List[str]`, *optional*):
899
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
900
+ if `guidance_scale` is less than `1`).
901
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
902
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
903
+ process.
904
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
905
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
906
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
907
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
908
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
909
+ height (`int`, *optional*, defaults to 512):
910
+ The height in pixels of the generated image.
911
+ width (`int`, *optional*, defaults to 512):
912
+ The width in pixels of the generated image.
913
+ num_inference_steps (`int`, *optional*, defaults to 50):
914
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
915
+ expense of slower inference.
916
+ guidance_scale (`float`, *optional*, defaults to 7.5):
917
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
918
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
919
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
920
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
921
+ usually at the expense of lower image quality.
922
+ strength (`float`, *optional*, defaults to 0.8):
923
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
924
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
925
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
926
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
927
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
928
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
929
+ The number of images to generate per prompt.
930
+ add_predicted_noise (`bool`, *optional*, defaults to True):
931
+ Use predicted noise instead of random noise when constructing noisy versions of the original image in
932
+ the reverse diffusion process
933
+ eta (`float`, *optional*, defaults to 0.0):
934
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
935
+ [`schedulers.DDIMScheduler`], will be ignored for others.
936
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
937
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
938
+ to make generation deterministic.
939
+ latents (`torch.FloatTensor`, *optional*):
940
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
941
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
942
+ tensor will ge generated by sampling using the supplied random `generator`.
943
+ prompt_embeds (`torch.FloatTensor`, *optional*):
944
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
945
+ provided, text embeddings will be generated from `prompt` input argument.
946
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
947
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
948
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
949
+ argument.
950
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
951
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
952
+ output_type (`str`, *optional*, defaults to `"pil"`):
953
+ The output format of the generate image. Choose between
954
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
955
+ return_dict (`bool`, *optional*, defaults to `True`):
956
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
957
+ plain tuple.
958
+ callback (`Callable`, *optional*):
959
+ A function that will be called every `callback_steps` steps during inference. The function will be
960
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
961
+ is_cancelled_callback (`Callable`, *optional*):
962
+ A function that will be called every `callback_steps` steps during inference. If the function returns
963
+ `True`, the inference will be cancelled.
964
+ callback_steps (`int`, *optional*, defaults to 1):
965
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
966
+ called at every step.
967
+ cross_attention_kwargs (`dict`, *optional*):
968
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
969
+ `self.processor` in
970
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
971
+
972
+ Returns:
973
+ `None` if cancelled by `is_cancelled_callback`,
974
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
975
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
976
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
977
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
978
+ (nsfw) content, according to the `safety_checker`.
979
+ """
980
+ # 0. Default height and width to unet
981
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
982
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
983
+
984
+ # 1. Check inputs. Raise error if not correct
985
+ self.check_inputs(
986
+ prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
987
+ )
988
+
989
+ # 2. Define call parameters
990
+ if prompt is not None and isinstance(prompt, str):
991
+ batch_size = 1
992
+ elif prompt is not None and isinstance(prompt, list):
993
+ batch_size = len(prompt)
994
+ else:
995
+ batch_size = prompt_embeds.shape[0]
996
+
997
+ device = self._execution_device
998
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
999
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1000
+ # corresponds to doing no classifier free guidance.
1001
+ do_classifier_free_guidance = guidance_scale > 1.0
1002
+
1003
+ # 3. Encode input prompt
1004
+ prompt_embeds = self._encode_prompt(
1005
+ prompt,
1006
+ device,
1007
+ num_images_per_prompt,
1008
+ do_classifier_free_guidance,
1009
+ negative_prompt,
1010
+ max_embeddings_multiples,
1011
+ prompt_embeds=prompt_embeds,
1012
+ negative_prompt_embeds=negative_prompt_embeds,
1013
+ )
1014
+ dtype = prompt_embeds.dtype
1015
+
1016
+ # 4. Preprocess image and mask
1017
+ if isinstance(image, PIL.Image.Image):
1018
+ image = preprocess_image(image, batch_size)
1019
+ if image is not None:
1020
+ image = image.to(device=self.device, dtype=dtype)
1021
+ if isinstance(mask_image, PIL.Image.Image):
1022
+ mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
1023
+ if mask_image is not None:
1024
+ mask = mask_image.to(device=self.device, dtype=dtype)
1025
+ mask = torch.cat([mask] * num_images_per_prompt)
1026
+ else:
1027
+ mask = None
1028
+
1029
+ # 5. set timesteps
1030
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1031
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
1032
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1033
+
1034
+ # 6. Prepare latent variables
1035
+ latents, init_latents_orig, noise = self.prepare_latents(
1036
+ image,
1037
+ latent_timestep,
1038
+ num_images_per_prompt,
1039
+ batch_size,
1040
+ self.unet.config.in_channels,
1041
+ height,
1042
+ width,
1043
+ dtype,
1044
+ device,
1045
+ generator,
1046
+ latents,
1047
+ )
1048
+
1049
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1050
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1051
+
1052
+ # 8. Denoising loop
1053
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1054
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1055
+ for i, t in enumerate(timesteps):
1056
+ # expand the latents if we are doing classifier free guidance
1057
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1058
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1059
+
1060
+ # predict the noise residual
1061
+ noise_pred = self.unet(
1062
+ latent_model_input,
1063
+ t,
1064
+ encoder_hidden_states=prompt_embeds,
1065
+ cross_attention_kwargs=cross_attention_kwargs,
1066
+ ).sample
1067
+
1068
+ # perform guidance
1069
+ if do_classifier_free_guidance:
1070
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1071
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1072
+
1073
+ # compute the previous noisy sample x_t -> x_t-1
1074
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1075
+
1076
+ if mask is not None:
1077
+ # masking
1078
+ if add_predicted_noise:
1079
+ init_latents_proper = self.scheduler.add_noise(
1080
+ init_latents_orig, noise_pred_uncond, torch.tensor([t])
1081
+ )
1082
+ else:
1083
+ init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
1084
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
1085
+
1086
+ # call the callback, if provided
1087
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1088
+ progress_bar.update()
1089
+ if i % callback_steps == 0:
1090
+ if callback is not None:
1091
+ step_idx = i // getattr(self.scheduler, "order", 1)
1092
+ callback(step_idx, t, latents)
1093
+ if is_cancelled_callback is not None and is_cancelled_callback():
1094
+ return None
1095
+
1096
+ if output_type == "latent":
1097
+ image = latents
1098
+ has_nsfw_concept = None
1099
+ elif output_type == "pil":
1100
+ # 9. Post-processing
1101
+ image = self.decode_latents(latents)
1102
+
1103
+ # 10. Run safety checker
1104
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1105
+
1106
+ # 11. Convert to PIL
1107
+ image = self.numpy_to_pil(image)
1108
+ else:
1109
+ # 9. Post-processing
1110
+ image = self.decode_latents(latents)
1111
+
1112
+ # 10. Run safety checker
1113
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1114
+
1115
+ # Offload last model to CPU
1116
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1117
+ self.final_offload_hook.offload()
1118
+
1119
+ if not return_dict:
1120
+ return image, has_nsfw_concept
1121
+
1122
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1123
+
1124
+ def text2img(
1125
+ self,
1126
+ prompt: Union[str, List[str]],
1127
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1128
+ height: int = 512,
1129
+ width: int = 512,
1130
+ num_inference_steps: int = 50,
1131
+ guidance_scale: float = 7.5,
1132
+ num_images_per_prompt: Optional[int] = 1,
1133
+ eta: float = 0.0,
1134
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1135
+ latents: Optional[torch.FloatTensor] = None,
1136
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1137
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1138
+ max_embeddings_multiples: Optional[int] = 3,
1139
+ output_type: Optional[str] = "pil",
1140
+ return_dict: bool = True,
1141
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1142
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1143
+ callback_steps: int = 1,
1144
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1145
+ ):
1146
+ r"""
1147
+ Function for text-to-image generation.
1148
+ Args:
1149
+ prompt (`str` or `List[str]`):
1150
+ The prompt or prompts to guide the image generation.
1151
+ negative_prompt (`str` or `List[str]`, *optional*):
1152
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1153
+ if `guidance_scale` is less than `1`).
1154
+ height (`int`, *optional*, defaults to 512):
1155
+ The height in pixels of the generated image.
1156
+ width (`int`, *optional*, defaults to 512):
1157
+ The width in pixels of the generated image.
1158
+ num_inference_steps (`int`, *optional*, defaults to 50):
1159
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1160
+ expense of slower inference.
1161
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1162
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1163
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1164
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1165
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1166
+ usually at the expense of lower image quality.
1167
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1168
+ The number of images to generate per prompt.
1169
+ eta (`float`, *optional*, defaults to 0.0):
1170
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1171
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1172
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1173
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1174
+ to make generation deterministic.
1175
+ latents (`torch.FloatTensor`, *optional*):
1176
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1177
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1178
+ tensor will ge generated by sampling using the supplied random `generator`.
1179
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1180
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1181
+ provided, text embeddings will be generated from `prompt` input argument.
1182
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1183
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1184
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1185
+ argument.
1186
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1187
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1188
+ output_type (`str`, *optional*, defaults to `"pil"`):
1189
+ The output format of the generate image. Choose between
1190
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1191
+ return_dict (`bool`, *optional*, defaults to `True`):
1192
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1193
+ plain tuple.
1194
+ callback (`Callable`, *optional*):
1195
+ A function that will be called every `callback_steps` steps during inference. The function will be
1196
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1197
+ is_cancelled_callback (`Callable`, *optional*):
1198
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1199
+ `True`, the inference will be cancelled.
1200
+ callback_steps (`int`, *optional*, defaults to 1):
1201
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1202
+ called at every step.
1203
+ cross_attention_kwargs (`dict`, *optional*):
1204
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1205
+ `self.processor` in
1206
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1207
+
1208
+ Returns:
1209
+ `None` if cancelled by `is_cancelled_callback`,
1210
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1211
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1212
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1213
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1214
+ (nsfw) content, according to the `safety_checker`.
1215
+ """
1216
+ return self.__call__(
1217
+ prompt=prompt,
1218
+ negative_prompt=negative_prompt,
1219
+ height=height,
1220
+ width=width,
1221
+ num_inference_steps=num_inference_steps,
1222
+ guidance_scale=guidance_scale,
1223
+ num_images_per_prompt=num_images_per_prompt,
1224
+ eta=eta,
1225
+ generator=generator,
1226
+ latents=latents,
1227
+ prompt_embeds=prompt_embeds,
1228
+ negative_prompt_embeds=negative_prompt_embeds,
1229
+ max_embeddings_multiples=max_embeddings_multiples,
1230
+ output_type=output_type,
1231
+ return_dict=return_dict,
1232
+ callback=callback,
1233
+ is_cancelled_callback=is_cancelled_callback,
1234
+ callback_steps=callback_steps,
1235
+ cross_attention_kwargs=cross_attention_kwargs,
1236
+ )
1237
+
1238
+ def img2img(
1239
+ self,
1240
+ image: Union[torch.FloatTensor, PIL.Image.Image],
1241
+ prompt: Union[str, List[str]],
1242
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1243
+ strength: float = 0.8,
1244
+ num_inference_steps: Optional[int] = 50,
1245
+ guidance_scale: Optional[float] = 7.5,
1246
+ num_images_per_prompt: Optional[int] = 1,
1247
+ eta: Optional[float] = 0.0,
1248
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1249
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1250
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1251
+ max_embeddings_multiples: Optional[int] = 3,
1252
+ output_type: Optional[str] = "pil",
1253
+ return_dict: bool = True,
1254
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1255
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1256
+ callback_steps: int = 1,
1257
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1258
+ ):
1259
+ r"""
1260
+ Function for image-to-image generation.
1261
+ Args:
1262
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
1263
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1264
+ process.
1265
+ prompt (`str` or `List[str]`):
1266
+ The prompt or prompts to guide the image generation.
1267
+ negative_prompt (`str` or `List[str]`, *optional*):
1268
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1269
+ if `guidance_scale` is less than `1`).
1270
+ strength (`float`, *optional*, defaults to 0.8):
1271
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
1272
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
1273
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
1274
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
1275
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
1276
+ num_inference_steps (`int`, *optional*, defaults to 50):
1277
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1278
+ expense of slower inference. This parameter will be modulated by `strength`.
1279
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1280
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1281
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1282
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1283
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1284
+ usually at the expense of lower image quality.
1285
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1286
+ The number of images to generate per prompt.
1287
+ eta (`float`, *optional*, defaults to 0.0):
1288
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1289
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1290
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1291
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1292
+ to make generation deterministic.
1293
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1294
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1295
+ provided, text embeddings will be generated from `prompt` input argument.
1296
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1297
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1298
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1299
+ argument.
1300
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1301
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1302
+ output_type (`str`, *optional*, defaults to `"pil"`):
1303
+ The output format of the generate image. Choose between
1304
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1305
+ return_dict (`bool`, *optional*, defaults to `True`):
1306
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1307
+ plain tuple.
1308
+ callback (`Callable`, *optional*):
1309
+ A function that will be called every `callback_steps` steps during inference. The function will be
1310
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1311
+ is_cancelled_callback (`Callable`, *optional*):
1312
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1313
+ `True`, the inference will be cancelled.
1314
+ callback_steps (`int`, *optional*, defaults to 1):
1315
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1316
+ called at every step.
1317
+ cross_attention_kwargs (`dict`, *optional*):
1318
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1319
+ `self.processor` in
1320
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1321
+
1322
+ Returns:
1323
+ `None` if cancelled by `is_cancelled_callback`,
1324
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1325
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1326
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1327
+ (nsfw) content, according to the `safety_checker`.
1328
+ """
1329
+ return self.__call__(
1330
+ prompt=prompt,
1331
+ negative_prompt=negative_prompt,
1332
+ image=image,
1333
+ num_inference_steps=num_inference_steps,
1334
+ guidance_scale=guidance_scale,
1335
+ strength=strength,
1336
+ num_images_per_prompt=num_images_per_prompt,
1337
+ eta=eta,
1338
+ generator=generator,
1339
+ prompt_embeds=prompt_embeds,
1340
+ negative_prompt_embeds=negative_prompt_embeds,
1341
+ max_embeddings_multiples=max_embeddings_multiples,
1342
+ output_type=output_type,
1343
+ return_dict=return_dict,
1344
+ callback=callback,
1345
+ is_cancelled_callback=is_cancelled_callback,
1346
+ callback_steps=callback_steps,
1347
+ cross_attention_kwargs=cross_attention_kwargs,
1348
+ )
1349
+
1350
+ def inpaint(
1351
+ self,
1352
+ image: Union[torch.FloatTensor, PIL.Image.Image],
1353
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
1354
+ prompt: Union[str, List[str]],
1355
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1356
+ strength: float = 0.8,
1357
+ num_inference_steps: Optional[int] = 50,
1358
+ guidance_scale: Optional[float] = 7.5,
1359
+ num_images_per_prompt: Optional[int] = 1,
1360
+ add_predicted_noise: Optional[bool] = False,
1361
+ eta: Optional[float] = 0.0,
1362
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1363
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1364
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1365
+ max_embeddings_multiples: Optional[int] = 3,
1366
+ output_type: Optional[str] = "pil",
1367
+ return_dict: bool = True,
1368
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1369
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1370
+ callback_steps: int = 1,
1371
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1372
+ ):
1373
+ r"""
1374
+ Function for inpaint.
1375
+ Args:
1376
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
1377
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1378
+ process. This is the image whose masked region will be inpainted.
1379
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
1380
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1381
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1382
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1383
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1384
+ prompt (`str` or `List[str]`):
1385
+ The prompt or prompts to guide the image generation.
1386
+ negative_prompt (`str` or `List[str]`, *optional*):
1387
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1388
+ if `guidance_scale` is less than `1`).
1389
+ strength (`float`, *optional*, defaults to 0.8):
1390
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1391
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
1392
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1393
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1394
+ num_inference_steps (`int`, *optional*, defaults to 50):
1395
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1396
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1397
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1398
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1399
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1400
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1401
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1402
+ usually at the expense of lower image quality.
1403
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1404
+ The number of images to generate per prompt.
1405
+ add_predicted_noise (`bool`, *optional*, defaults to True):
1406
+ Use predicted noise instead of random noise when constructing noisy versions of the original image in
1407
+ the reverse diffusion process
1408
+ eta (`float`, *optional*, defaults to 0.0):
1409
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1410
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1411
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1412
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1413
+ to make generation deterministic.
1414
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1415
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1416
+ provided, text embeddings will be generated from `prompt` input argument.
1417
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1418
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1419
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1420
+ argument.
1421
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1422
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1423
+ output_type (`str`, *optional*, defaults to `"pil"`):
1424
+ The output format of the generate image. Choose between
1425
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1426
+ return_dict (`bool`, *optional*, defaults to `True`):
1427
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1428
+ plain tuple.
1429
+ callback (`Callable`, *optional*):
1430
+ A function that will be called every `callback_steps` steps during inference. The function will be
1431
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1432
+ is_cancelled_callback (`Callable`, *optional*):
1433
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1434
+ `True`, the inference will be cancelled.
1435
+ callback_steps (`int`, *optional*, defaults to 1):
1436
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1437
+ called at every step.
1438
+ cross_attention_kwargs (`dict`, *optional*):
1439
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1440
+ `self.processor` in
1441
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1442
+
1443
+ Returns:
1444
+ `None` if cancelled by `is_cancelled_callback`,
1445
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1446
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1447
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1448
+ (nsfw) content, according to the `safety_checker`.
1449
+ """
1450
+ return self.__call__(
1451
+ prompt=prompt,
1452
+ negative_prompt=negative_prompt,
1453
+ image=image,
1454
+ mask_image=mask_image,
1455
+ num_inference_steps=num_inference_steps,
1456
+ guidance_scale=guidance_scale,
1457
+ strength=strength,
1458
+ num_images_per_prompt=num_images_per_prompt,
1459
+ add_predicted_noise=add_predicted_noise,
1460
+ eta=eta,
1461
+ generator=generator,
1462
+ prompt_embeds=prompt_embeds,
1463
+ negative_prompt_embeds=negative_prompt_embeds,
1464
+ max_embeddings_multiples=max_embeddings_multiples,
1465
+ output_type=output_type,
1466
+ return_dict=return_dict,
1467
+ callback=callback,
1468
+ is_cancelled_callback=is_cancelled_callback,
1469
+ callback_steps=callback_steps,
1470
+ cross_attention_kwargs=cross_attention_kwargs,
1471
+ )
v0.24.0/lpw_stable_diffusion_onnx.py ADDED
@@ -0,0 +1,1148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Callable, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from packaging import version
9
+ from transformers import CLIPImageProcessor, CLIPTokenizer
10
+
11
+ import diffusers
12
+ from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
+ from diffusers.utils import logging
15
+
16
+
17
+ try:
18
+ from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE
19
+ except ImportError:
20
+ ORT_TO_NP_TYPE = {
21
+ "tensor(bool)": np.bool_,
22
+ "tensor(int8)": np.int8,
23
+ "tensor(uint8)": np.uint8,
24
+ "tensor(int16)": np.int16,
25
+ "tensor(uint16)": np.uint16,
26
+ "tensor(int32)": np.int32,
27
+ "tensor(uint32)": np.uint32,
28
+ "tensor(int64)": np.int64,
29
+ "tensor(uint64)": np.uint64,
30
+ "tensor(float16)": np.float16,
31
+ "tensor(float)": np.float32,
32
+ "tensor(double)": np.float64,
33
+ }
34
+
35
+ try:
36
+ from diffusers.utils import PIL_INTERPOLATION
37
+ except ImportError:
38
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
39
+ PIL_INTERPOLATION = {
40
+ "linear": PIL.Image.Resampling.BILINEAR,
41
+ "bilinear": PIL.Image.Resampling.BILINEAR,
42
+ "bicubic": PIL.Image.Resampling.BICUBIC,
43
+ "lanczos": PIL.Image.Resampling.LANCZOS,
44
+ "nearest": PIL.Image.Resampling.NEAREST,
45
+ }
46
+ else:
47
+ PIL_INTERPOLATION = {
48
+ "linear": PIL.Image.LINEAR,
49
+ "bilinear": PIL.Image.BILINEAR,
50
+ "bicubic": PIL.Image.BICUBIC,
51
+ "lanczos": PIL.Image.LANCZOS,
52
+ "nearest": PIL.Image.NEAREST,
53
+ }
54
+ # ------------------------------------------------------------------------------
55
+
56
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
57
+
58
+ re_attention = re.compile(
59
+ r"""
60
+ \\\(|
61
+ \\\)|
62
+ \\\[|
63
+ \\]|
64
+ \\\\|
65
+ \\|
66
+ \(|
67
+ \[|
68
+ :([+-]?[.\d]+)\)|
69
+ \)|
70
+ ]|
71
+ [^\\()\[\]:]+|
72
+ :
73
+ """,
74
+ re.X,
75
+ )
76
+
77
+
78
+ def parse_prompt_attention(text):
79
+ """
80
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
81
+ Accepted tokens are:
82
+ (abc) - increases attention to abc by a multiplier of 1.1
83
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
84
+ [abc] - decreases attention to abc by a multiplier of 1.1
85
+ \\( - literal character '('
86
+ \\[ - literal character '['
87
+ \\) - literal character ')'
88
+ \\] - literal character ']'
89
+ \\ - literal character '\'
90
+ anything else - just text
91
+ >>> parse_prompt_attention('normal text')
92
+ [['normal text', 1.0]]
93
+ >>> parse_prompt_attention('an (important) word')
94
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
95
+ >>> parse_prompt_attention('(unbalanced')
96
+ [['unbalanced', 1.1]]
97
+ >>> parse_prompt_attention('\\(literal\\]')
98
+ [['(literal]', 1.0]]
99
+ >>> parse_prompt_attention('(unnecessary)(parens)')
100
+ [['unnecessaryparens', 1.1]]
101
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
102
+ [['a ', 1.0],
103
+ ['house', 1.5730000000000004],
104
+ [' ', 1.1],
105
+ ['on', 1.0],
106
+ [' a ', 1.1],
107
+ ['hill', 0.55],
108
+ [', sun, ', 1.1],
109
+ ['sky', 1.4641000000000006],
110
+ ['.', 1.1]]
111
+ """
112
+
113
+ res = []
114
+ round_brackets = []
115
+ square_brackets = []
116
+
117
+ round_bracket_multiplier = 1.1
118
+ square_bracket_multiplier = 1 / 1.1
119
+
120
+ def multiply_range(start_position, multiplier):
121
+ for p in range(start_position, len(res)):
122
+ res[p][1] *= multiplier
123
+
124
+ for m in re_attention.finditer(text):
125
+ text = m.group(0)
126
+ weight = m.group(1)
127
+
128
+ if text.startswith("\\"):
129
+ res.append([text[1:], 1.0])
130
+ elif text == "(":
131
+ round_brackets.append(len(res))
132
+ elif text == "[":
133
+ square_brackets.append(len(res))
134
+ elif weight is not None and len(round_brackets) > 0:
135
+ multiply_range(round_brackets.pop(), float(weight))
136
+ elif text == ")" and len(round_brackets) > 0:
137
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
138
+ elif text == "]" and len(square_brackets) > 0:
139
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
140
+ else:
141
+ res.append([text, 1.0])
142
+
143
+ for pos in round_brackets:
144
+ multiply_range(pos, round_bracket_multiplier)
145
+
146
+ for pos in square_brackets:
147
+ multiply_range(pos, square_bracket_multiplier)
148
+
149
+ if len(res) == 0:
150
+ res = [["", 1.0]]
151
+
152
+ # merge runs of identical weights
153
+ i = 0
154
+ while i + 1 < len(res):
155
+ if res[i][1] == res[i + 1][1]:
156
+ res[i][0] += res[i + 1][0]
157
+ res.pop(i + 1)
158
+ else:
159
+ i += 1
160
+
161
+ return res
162
+
163
+
164
+ def get_prompts_with_weights(pipe, prompt: List[str], max_length: int):
165
+ r"""
166
+ Tokenize a list of prompts and return its tokens with weights of each token.
167
+
168
+ No padding, starting or ending token is included.
169
+ """
170
+ tokens = []
171
+ weights = []
172
+ truncated = False
173
+ for text in prompt:
174
+ texts_and_weights = parse_prompt_attention(text)
175
+ text_token = []
176
+ text_weight = []
177
+ for word, weight in texts_and_weights:
178
+ # tokenize and discard the starting and the ending token
179
+ token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1]
180
+ text_token += list(token)
181
+ # copy the weight by length of token
182
+ text_weight += [weight] * len(token)
183
+ # stop if the text is too long (longer than truncation limit)
184
+ if len(text_token) > max_length:
185
+ truncated = True
186
+ break
187
+ # truncate
188
+ if len(text_token) > max_length:
189
+ truncated = True
190
+ text_token = text_token[:max_length]
191
+ text_weight = text_weight[:max_length]
192
+ tokens.append(text_token)
193
+ weights.append(text_weight)
194
+ if truncated:
195
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
196
+ return tokens, weights
197
+
198
+
199
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
200
+ r"""
201
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
202
+ """
203
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
204
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
205
+ for i in range(len(tokens)):
206
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
207
+ if no_boseos_middle:
208
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
209
+ else:
210
+ w = []
211
+ if len(weights[i]) == 0:
212
+ w = [1.0] * weights_length
213
+ else:
214
+ for j in range(max_embeddings_multiples):
215
+ w.append(1.0) # weight for starting token in this chunk
216
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
217
+ w.append(1.0) # weight for ending token in this chunk
218
+ w += [1.0] * (weights_length - len(w))
219
+ weights[i] = w[:]
220
+
221
+ return tokens, weights
222
+
223
+
224
+ def get_unweighted_text_embeddings(
225
+ pipe,
226
+ text_input: np.array,
227
+ chunk_length: int,
228
+ no_boseos_middle: Optional[bool] = True,
229
+ ):
230
+ """
231
+ When the length of tokens is a multiple of the capacity of the text encoder,
232
+ it should be split into chunks and sent to the text encoder individually.
233
+ """
234
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
235
+ if max_embeddings_multiples > 1:
236
+ text_embeddings = []
237
+ for i in range(max_embeddings_multiples):
238
+ # extract the i-th chunk
239
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy()
240
+
241
+ # cover the head and the tail by the starting and the ending tokens
242
+ text_input_chunk[:, 0] = text_input[0, 0]
243
+ text_input_chunk[:, -1] = text_input[0, -1]
244
+
245
+ text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0]
246
+
247
+ if no_boseos_middle:
248
+ if i == 0:
249
+ # discard the ending token
250
+ text_embedding = text_embedding[:, :-1]
251
+ elif i == max_embeddings_multiples - 1:
252
+ # discard the starting token
253
+ text_embedding = text_embedding[:, 1:]
254
+ else:
255
+ # discard both starting and ending tokens
256
+ text_embedding = text_embedding[:, 1:-1]
257
+
258
+ text_embeddings.append(text_embedding)
259
+ text_embeddings = np.concatenate(text_embeddings, axis=1)
260
+ else:
261
+ text_embeddings = pipe.text_encoder(input_ids=text_input)[0]
262
+ return text_embeddings
263
+
264
+
265
+ def get_weighted_text_embeddings(
266
+ pipe,
267
+ prompt: Union[str, List[str]],
268
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
269
+ max_embeddings_multiples: Optional[int] = 4,
270
+ no_boseos_middle: Optional[bool] = False,
271
+ skip_parsing: Optional[bool] = False,
272
+ skip_weighting: Optional[bool] = False,
273
+ **kwargs,
274
+ ):
275
+ r"""
276
+ Prompts can be assigned with local weights using brackets. For example,
277
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
278
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
279
+
280
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
281
+
282
+ Args:
283
+ pipe (`OnnxStableDiffusionPipeline`):
284
+ Pipe to provide access to the tokenizer and the text encoder.
285
+ prompt (`str` or `List[str]`):
286
+ The prompt or prompts to guide the image generation.
287
+ uncond_prompt (`str` or `List[str]`):
288
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
289
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
290
+ max_embeddings_multiples (`int`, *optional*, defaults to `1`):
291
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
292
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
293
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
294
+ ending token in each of the chunk in the middle.
295
+ skip_parsing (`bool`, *optional*, defaults to `False`):
296
+ Skip the parsing of brackets.
297
+ skip_weighting (`bool`, *optional*, defaults to `False`):
298
+ Skip the weighting. When the parsing is skipped, it is forced True.
299
+ """
300
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
301
+ if isinstance(prompt, str):
302
+ prompt = [prompt]
303
+
304
+ if not skip_parsing:
305
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
306
+ if uncond_prompt is not None:
307
+ if isinstance(uncond_prompt, str):
308
+ uncond_prompt = [uncond_prompt]
309
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
310
+ else:
311
+ prompt_tokens = [
312
+ token[1:-1]
313
+ for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids
314
+ ]
315
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
316
+ if uncond_prompt is not None:
317
+ if isinstance(uncond_prompt, str):
318
+ uncond_prompt = [uncond_prompt]
319
+ uncond_tokens = [
320
+ token[1:-1]
321
+ for token in pipe.tokenizer(
322
+ uncond_prompt,
323
+ max_length=max_length,
324
+ truncation=True,
325
+ return_tensors="np",
326
+ ).input_ids
327
+ ]
328
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
329
+
330
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
331
+ max_length = max([len(token) for token in prompt_tokens])
332
+ if uncond_prompt is not None:
333
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
334
+
335
+ max_embeddings_multiples = min(
336
+ max_embeddings_multiples,
337
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
338
+ )
339
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
340
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
341
+
342
+ # pad the length of tokens and weights
343
+ bos = pipe.tokenizer.bos_token_id
344
+ eos = pipe.tokenizer.eos_token_id
345
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
346
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
347
+ prompt_tokens,
348
+ prompt_weights,
349
+ max_length,
350
+ bos,
351
+ eos,
352
+ pad,
353
+ no_boseos_middle=no_boseos_middle,
354
+ chunk_length=pipe.tokenizer.model_max_length,
355
+ )
356
+ prompt_tokens = np.array(prompt_tokens, dtype=np.int32)
357
+ if uncond_prompt is not None:
358
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
359
+ uncond_tokens,
360
+ uncond_weights,
361
+ max_length,
362
+ bos,
363
+ eos,
364
+ pad,
365
+ no_boseos_middle=no_boseos_middle,
366
+ chunk_length=pipe.tokenizer.model_max_length,
367
+ )
368
+ uncond_tokens = np.array(uncond_tokens, dtype=np.int32)
369
+
370
+ # get the embeddings
371
+ text_embeddings = get_unweighted_text_embeddings(
372
+ pipe,
373
+ prompt_tokens,
374
+ pipe.tokenizer.model_max_length,
375
+ no_boseos_middle=no_boseos_middle,
376
+ )
377
+ prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)
378
+ if uncond_prompt is not None:
379
+ uncond_embeddings = get_unweighted_text_embeddings(
380
+ pipe,
381
+ uncond_tokens,
382
+ pipe.tokenizer.model_max_length,
383
+ no_boseos_middle=no_boseos_middle,
384
+ )
385
+ uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype)
386
+
387
+ # assign weights to the prompts and normalize in the sense of mean
388
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
389
+ if (not skip_parsing) and (not skip_weighting):
390
+ previous_mean = text_embeddings.mean(axis=(-2, -1))
391
+ text_embeddings *= prompt_weights[:, :, None]
392
+ text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]
393
+ if uncond_prompt is not None:
394
+ previous_mean = uncond_embeddings.mean(axis=(-2, -1))
395
+ uncond_embeddings *= uncond_weights[:, :, None]
396
+ uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None]
397
+
398
+ # For classifier free guidance, we need to do two forward passes.
399
+ # Here we concatenate the unconditional and text embeddings into a single batch
400
+ # to avoid doing two forward passes
401
+ if uncond_prompt is not None:
402
+ return text_embeddings, uncond_embeddings
403
+
404
+ return text_embeddings
405
+
406
+
407
+ def preprocess_image(image):
408
+ w, h = image.size
409
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
410
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
411
+ image = np.array(image).astype(np.float32) / 255.0
412
+ image = image[None].transpose(0, 3, 1, 2)
413
+ return 2.0 * image - 1.0
414
+
415
+
416
+ def preprocess_mask(mask, scale_factor=8):
417
+ mask = mask.convert("L")
418
+ w, h = mask.size
419
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
420
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
421
+ mask = np.array(mask).astype(np.float32) / 255.0
422
+ mask = np.tile(mask, (4, 1, 1))
423
+ mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
424
+ mask = 1 - mask # repaint white, keep black
425
+ return mask
426
+
427
+
428
+ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline):
429
+ r"""
430
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
431
+ weighting in prompt.
432
+
433
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
434
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
435
+ """
436
+
437
+ if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
438
+
439
+ def __init__(
440
+ self,
441
+ vae_encoder: OnnxRuntimeModel,
442
+ vae_decoder: OnnxRuntimeModel,
443
+ text_encoder: OnnxRuntimeModel,
444
+ tokenizer: CLIPTokenizer,
445
+ unet: OnnxRuntimeModel,
446
+ scheduler: SchedulerMixin,
447
+ safety_checker: OnnxRuntimeModel,
448
+ feature_extractor: CLIPImageProcessor,
449
+ requires_safety_checker: bool = True,
450
+ ):
451
+ super().__init__(
452
+ vae_encoder=vae_encoder,
453
+ vae_decoder=vae_decoder,
454
+ text_encoder=text_encoder,
455
+ tokenizer=tokenizer,
456
+ unet=unet,
457
+ scheduler=scheduler,
458
+ safety_checker=safety_checker,
459
+ feature_extractor=feature_extractor,
460
+ requires_safety_checker=requires_safety_checker,
461
+ )
462
+ self.__init__additional__()
463
+
464
+ else:
465
+
466
+ def __init__(
467
+ self,
468
+ vae_encoder: OnnxRuntimeModel,
469
+ vae_decoder: OnnxRuntimeModel,
470
+ text_encoder: OnnxRuntimeModel,
471
+ tokenizer: CLIPTokenizer,
472
+ unet: OnnxRuntimeModel,
473
+ scheduler: SchedulerMixin,
474
+ safety_checker: OnnxRuntimeModel,
475
+ feature_extractor: CLIPImageProcessor,
476
+ ):
477
+ super().__init__(
478
+ vae_encoder=vae_encoder,
479
+ vae_decoder=vae_decoder,
480
+ text_encoder=text_encoder,
481
+ tokenizer=tokenizer,
482
+ unet=unet,
483
+ scheduler=scheduler,
484
+ safety_checker=safety_checker,
485
+ feature_extractor=feature_extractor,
486
+ )
487
+ self.__init__additional__()
488
+
489
+ def __init__additional__(self):
490
+ self.unet.config.in_channels = 4
491
+ self.vae_scale_factor = 8
492
+
493
+ def _encode_prompt(
494
+ self,
495
+ prompt,
496
+ num_images_per_prompt,
497
+ do_classifier_free_guidance,
498
+ negative_prompt,
499
+ max_embeddings_multiples,
500
+ ):
501
+ r"""
502
+ Encodes the prompt into text encoder hidden states.
503
+
504
+ Args:
505
+ prompt (`str` or `list(int)`):
506
+ prompt to be encoded
507
+ num_images_per_prompt (`int`):
508
+ number of images that should be generated per prompt
509
+ do_classifier_free_guidance (`bool`):
510
+ whether to use classifier free guidance or not
511
+ negative_prompt (`str` or `List[str]`):
512
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
513
+ if `guidance_scale` is less than `1`).
514
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
515
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
516
+ """
517
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
518
+
519
+ if negative_prompt is None:
520
+ negative_prompt = [""] * batch_size
521
+ elif isinstance(negative_prompt, str):
522
+ negative_prompt = [negative_prompt] * batch_size
523
+ if batch_size != len(negative_prompt):
524
+ raise ValueError(
525
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
526
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
527
+ " the batch size of `prompt`."
528
+ )
529
+
530
+ text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
531
+ pipe=self,
532
+ prompt=prompt,
533
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
534
+ max_embeddings_multiples=max_embeddings_multiples,
535
+ )
536
+
537
+ text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0)
538
+ if do_classifier_free_guidance:
539
+ uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0)
540
+ text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
541
+
542
+ return text_embeddings
543
+
544
+ def check_inputs(self, prompt, height, width, strength, callback_steps):
545
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
546
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
547
+
548
+ if strength < 0 or strength > 1:
549
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
550
+
551
+ if height % 8 != 0 or width % 8 != 0:
552
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
553
+
554
+ if (callback_steps is None) or (
555
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
556
+ ):
557
+ raise ValueError(
558
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
559
+ f" {type(callback_steps)}."
560
+ )
561
+
562
+ def get_timesteps(self, num_inference_steps, strength, is_text2img):
563
+ if is_text2img:
564
+ return self.scheduler.timesteps, num_inference_steps
565
+ else:
566
+ # get the original timestep using init_timestep
567
+ offset = self.scheduler.config.get("steps_offset", 0)
568
+ init_timestep = int(num_inference_steps * strength) + offset
569
+ init_timestep = min(init_timestep, num_inference_steps)
570
+
571
+ t_start = max(num_inference_steps - init_timestep + offset, 0)
572
+ timesteps = self.scheduler.timesteps[t_start:]
573
+ return timesteps, num_inference_steps - t_start
574
+
575
+ def run_safety_checker(self, image):
576
+ if self.safety_checker is not None:
577
+ safety_checker_input = self.feature_extractor(
578
+ self.numpy_to_pil(image), return_tensors="np"
579
+ ).pixel_values.astype(image.dtype)
580
+ # There will throw an error if use safety_checker directly and batchsize>1
581
+ images, has_nsfw_concept = [], []
582
+ for i in range(image.shape[0]):
583
+ image_i, has_nsfw_concept_i = self.safety_checker(
584
+ clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
585
+ )
586
+ images.append(image_i)
587
+ has_nsfw_concept.append(has_nsfw_concept_i[0])
588
+ image = np.concatenate(images)
589
+ else:
590
+ has_nsfw_concept = None
591
+ return image, has_nsfw_concept
592
+
593
+ def decode_latents(self, latents):
594
+ latents = 1 / 0.18215 * latents
595
+ # image = self.vae_decoder(latent_sample=latents)[0]
596
+ # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
597
+ image = np.concatenate(
598
+ [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
599
+ )
600
+ image = np.clip(image / 2 + 0.5, 0, 1)
601
+ image = image.transpose((0, 2, 3, 1))
602
+ return image
603
+
604
+ def prepare_extra_step_kwargs(self, generator, eta):
605
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
606
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
607
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
608
+ # and should be between [0, 1]
609
+
610
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
611
+ extra_step_kwargs = {}
612
+ if accepts_eta:
613
+ extra_step_kwargs["eta"] = eta
614
+
615
+ # check if the scheduler accepts generator
616
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
617
+ if accepts_generator:
618
+ extra_step_kwargs["generator"] = generator
619
+ return extra_step_kwargs
620
+
621
+ def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None):
622
+ if image is None:
623
+ shape = (
624
+ batch_size,
625
+ self.unet.config.in_channels,
626
+ height // self.vae_scale_factor,
627
+ width // self.vae_scale_factor,
628
+ )
629
+
630
+ if latents is None:
631
+ latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
632
+ else:
633
+ if latents.shape != shape:
634
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
635
+
636
+ # scale the initial noise by the standard deviation required by the scheduler
637
+ latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy()
638
+ return latents, None, None
639
+ else:
640
+ init_latents = self.vae_encoder(sample=image)[0]
641
+ init_latents = 0.18215 * init_latents
642
+ init_latents = np.concatenate([init_latents] * batch_size, axis=0)
643
+ init_latents_orig = init_latents
644
+ shape = init_latents.shape
645
+
646
+ # add noise to latents using the timesteps
647
+ noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
648
+ latents = self.scheduler.add_noise(
649
+ torch.from_numpy(init_latents), torch.from_numpy(noise), timestep
650
+ ).numpy()
651
+ return latents, init_latents_orig, noise
652
+
653
+ @torch.no_grad()
654
+ def __call__(
655
+ self,
656
+ prompt: Union[str, List[str]],
657
+ negative_prompt: Optional[Union[str, List[str]]] = None,
658
+ image: Union[np.ndarray, PIL.Image.Image] = None,
659
+ mask_image: Union[np.ndarray, PIL.Image.Image] = None,
660
+ height: int = 512,
661
+ width: int = 512,
662
+ num_inference_steps: int = 50,
663
+ guidance_scale: float = 7.5,
664
+ strength: float = 0.8,
665
+ num_images_per_prompt: Optional[int] = 1,
666
+ eta: float = 0.0,
667
+ generator: Optional[torch.Generator] = None,
668
+ latents: Optional[np.ndarray] = None,
669
+ max_embeddings_multiples: Optional[int] = 3,
670
+ output_type: Optional[str] = "pil",
671
+ return_dict: bool = True,
672
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
673
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
674
+ callback_steps: int = 1,
675
+ **kwargs,
676
+ ):
677
+ r"""
678
+ Function invoked when calling the pipeline for generation.
679
+
680
+ Args:
681
+ prompt (`str` or `List[str]`):
682
+ The prompt or prompts to guide the image generation.
683
+ negative_prompt (`str` or `List[str]`, *optional*):
684
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
685
+ if `guidance_scale` is less than `1`).
686
+ image (`np.ndarray` or `PIL.Image.Image`):
687
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
688
+ process.
689
+ mask_image (`np.ndarray` or `PIL.Image.Image`):
690
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
691
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
692
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
693
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
694
+ height (`int`, *optional*, defaults to 512):
695
+ The height in pixels of the generated image.
696
+ width (`int`, *optional*, defaults to 512):
697
+ The width in pixels of the generated image.
698
+ num_inference_steps (`int`, *optional*, defaults to 50):
699
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
700
+ expense of slower inference.
701
+ guidance_scale (`float`, *optional*, defaults to 7.5):
702
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
703
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
704
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
705
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
706
+ usually at the expense of lower image quality.
707
+ strength (`float`, *optional*, defaults to 0.8):
708
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
709
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
710
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
711
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
712
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
713
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
714
+ The number of images to generate per prompt.
715
+ eta (`float`, *optional*, defaults to 0.0):
716
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
717
+ [`schedulers.DDIMScheduler`], will be ignored for others.
718
+ generator (`torch.Generator`, *optional*):
719
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
720
+ deterministic.
721
+ latents (`np.ndarray`, *optional*):
722
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
723
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
724
+ tensor will ge generated by sampling using the supplied random `generator`.
725
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
726
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
727
+ output_type (`str`, *optional*, defaults to `"pil"`):
728
+ The output format of the generate image. Choose between
729
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
730
+ return_dict (`bool`, *optional*, defaults to `True`):
731
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
732
+ plain tuple.
733
+ callback (`Callable`, *optional*):
734
+ A function that will be called every `callback_steps` steps during inference. The function will be
735
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
736
+ is_cancelled_callback (`Callable`, *optional*):
737
+ A function that will be called every `callback_steps` steps during inference. If the function returns
738
+ `True`, the inference will be cancelled.
739
+ callback_steps (`int`, *optional*, defaults to 1):
740
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
741
+ called at every step.
742
+
743
+ Returns:
744
+ `None` if cancelled by `is_cancelled_callback`,
745
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
746
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
747
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
748
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
749
+ (nsfw) content, according to the `safety_checker`.
750
+ """
751
+ # 0. Default height and width to unet
752
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
753
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
754
+
755
+ # 1. Check inputs. Raise error if not correct
756
+ self.check_inputs(prompt, height, width, strength, callback_steps)
757
+
758
+ # 2. Define call parameters
759
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
760
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
761
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
762
+ # corresponds to doing no classifier free guidance.
763
+ do_classifier_free_guidance = guidance_scale > 1.0
764
+
765
+ # 3. Encode input prompt
766
+ text_embeddings = self._encode_prompt(
767
+ prompt,
768
+ num_images_per_prompt,
769
+ do_classifier_free_guidance,
770
+ negative_prompt,
771
+ max_embeddings_multiples,
772
+ )
773
+ dtype = text_embeddings.dtype
774
+
775
+ # 4. Preprocess image and mask
776
+ if isinstance(image, PIL.Image.Image):
777
+ image = preprocess_image(image)
778
+ if image is not None:
779
+ image = image.astype(dtype)
780
+ if isinstance(mask_image, PIL.Image.Image):
781
+ mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
782
+ if mask_image is not None:
783
+ mask = mask_image.astype(dtype)
784
+ mask = np.concatenate([mask] * batch_size * num_images_per_prompt)
785
+ else:
786
+ mask = None
787
+
788
+ # 5. set timesteps
789
+ self.scheduler.set_timesteps(num_inference_steps)
790
+ timestep_dtype = next(
791
+ (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
792
+ )
793
+ timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
794
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None)
795
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
796
+
797
+ # 6. Prepare latent variables
798
+ latents, init_latents_orig, noise = self.prepare_latents(
799
+ image,
800
+ latent_timestep,
801
+ batch_size * num_images_per_prompt,
802
+ height,
803
+ width,
804
+ dtype,
805
+ generator,
806
+ latents,
807
+ )
808
+
809
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
810
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
811
+
812
+ # 8. Denoising loop
813
+ for i, t in enumerate(self.progress_bar(timesteps)):
814
+ # expand the latents if we are doing classifier free guidance
815
+ latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
816
+ latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
817
+ latent_model_input = latent_model_input.numpy()
818
+
819
+ # predict the noise residual
820
+ noise_pred = self.unet(
821
+ sample=latent_model_input,
822
+ timestep=np.array([t], dtype=timestep_dtype),
823
+ encoder_hidden_states=text_embeddings,
824
+ )
825
+ noise_pred = noise_pred[0]
826
+
827
+ # perform guidance
828
+ if do_classifier_free_guidance:
829
+ noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
830
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
831
+
832
+ # compute the previous noisy sample x_t -> x_t-1
833
+ scheduler_output = self.scheduler.step(
834
+ torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
835
+ )
836
+ latents = scheduler_output.prev_sample.numpy()
837
+
838
+ if mask is not None:
839
+ # masking
840
+ init_latents_proper = self.scheduler.add_noise(
841
+ torch.from_numpy(init_latents_orig),
842
+ torch.from_numpy(noise),
843
+ t,
844
+ ).numpy()
845
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
846
+
847
+ # call the callback, if provided
848
+ if i % callback_steps == 0:
849
+ if callback is not None:
850
+ step_idx = i // getattr(self.scheduler, "order", 1)
851
+ callback(step_idx, t, latents)
852
+ if is_cancelled_callback is not None and is_cancelled_callback():
853
+ return None
854
+
855
+ # 9. Post-processing
856
+ image = self.decode_latents(latents)
857
+
858
+ # 10. Run safety checker
859
+ image, has_nsfw_concept = self.run_safety_checker(image)
860
+
861
+ # 11. Convert to PIL
862
+ if output_type == "pil":
863
+ image = self.numpy_to_pil(image)
864
+
865
+ if not return_dict:
866
+ return image, has_nsfw_concept
867
+
868
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
869
+
870
+ def text2img(
871
+ self,
872
+ prompt: Union[str, List[str]],
873
+ negative_prompt: Optional[Union[str, List[str]]] = None,
874
+ height: int = 512,
875
+ width: int = 512,
876
+ num_inference_steps: int = 50,
877
+ guidance_scale: float = 7.5,
878
+ num_images_per_prompt: Optional[int] = 1,
879
+ eta: float = 0.0,
880
+ generator: Optional[torch.Generator] = None,
881
+ latents: Optional[np.ndarray] = None,
882
+ max_embeddings_multiples: Optional[int] = 3,
883
+ output_type: Optional[str] = "pil",
884
+ return_dict: bool = True,
885
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
886
+ callback_steps: int = 1,
887
+ **kwargs,
888
+ ):
889
+ r"""
890
+ Function for text-to-image generation.
891
+ Args:
892
+ prompt (`str` or `List[str]`):
893
+ The prompt or prompts to guide the image generation.
894
+ negative_prompt (`str` or `List[str]`, *optional*):
895
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
896
+ if `guidance_scale` is less than `1`).
897
+ height (`int`, *optional*, defaults to 512):
898
+ The height in pixels of the generated image.
899
+ width (`int`, *optional*, defaults to 512):
900
+ The width in pixels of the generated image.
901
+ num_inference_steps (`int`, *optional*, defaults to 50):
902
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
903
+ expense of slower inference.
904
+ guidance_scale (`float`, *optional*, defaults to 7.5):
905
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
906
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
907
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
908
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
909
+ usually at the expense of lower image quality.
910
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
911
+ The number of images to generate per prompt.
912
+ eta (`float`, *optional*, defaults to 0.0):
913
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
914
+ [`schedulers.DDIMScheduler`], will be ignored for others.
915
+ generator (`torch.Generator`, *optional*):
916
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
917
+ deterministic.
918
+ latents (`np.ndarray`, *optional*):
919
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
920
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
921
+ tensor will ge generated by sampling using the supplied random `generator`.
922
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
923
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
924
+ output_type (`str`, *optional*, defaults to `"pil"`):
925
+ The output format of the generate image. Choose between
926
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
927
+ return_dict (`bool`, *optional*, defaults to `True`):
928
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
929
+ plain tuple.
930
+ callback (`Callable`, *optional*):
931
+ A function that will be called every `callback_steps` steps during inference. The function will be
932
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
933
+ callback_steps (`int`, *optional*, defaults to 1):
934
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
935
+ called at every step.
936
+ Returns:
937
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
938
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
939
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
940
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
941
+ (nsfw) content, according to the `safety_checker`.
942
+ """
943
+ return self.__call__(
944
+ prompt=prompt,
945
+ negative_prompt=negative_prompt,
946
+ height=height,
947
+ width=width,
948
+ num_inference_steps=num_inference_steps,
949
+ guidance_scale=guidance_scale,
950
+ num_images_per_prompt=num_images_per_prompt,
951
+ eta=eta,
952
+ generator=generator,
953
+ latents=latents,
954
+ max_embeddings_multiples=max_embeddings_multiples,
955
+ output_type=output_type,
956
+ return_dict=return_dict,
957
+ callback=callback,
958
+ callback_steps=callback_steps,
959
+ **kwargs,
960
+ )
961
+
962
+ def img2img(
963
+ self,
964
+ image: Union[np.ndarray, PIL.Image.Image],
965
+ prompt: Union[str, List[str]],
966
+ negative_prompt: Optional[Union[str, List[str]]] = None,
967
+ strength: float = 0.8,
968
+ num_inference_steps: Optional[int] = 50,
969
+ guidance_scale: Optional[float] = 7.5,
970
+ num_images_per_prompt: Optional[int] = 1,
971
+ eta: Optional[float] = 0.0,
972
+ generator: Optional[torch.Generator] = None,
973
+ max_embeddings_multiples: Optional[int] = 3,
974
+ output_type: Optional[str] = "pil",
975
+ return_dict: bool = True,
976
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
977
+ callback_steps: int = 1,
978
+ **kwargs,
979
+ ):
980
+ r"""
981
+ Function for image-to-image generation.
982
+ Args:
983
+ image (`np.ndarray` or `PIL.Image.Image`):
984
+ `Image`, or ndarray representing an image batch, that will be used as the starting point for the
985
+ process.
986
+ prompt (`str` or `List[str]`):
987
+ The prompt or prompts to guide the image generation.
988
+ negative_prompt (`str` or `List[str]`, *optional*):
989
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
990
+ if `guidance_scale` is less than `1`).
991
+ strength (`float`, *optional*, defaults to 0.8):
992
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
993
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
994
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
995
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
996
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
997
+ num_inference_steps (`int`, *optional*, defaults to 50):
998
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
999
+ expense of slower inference. This parameter will be modulated by `strength`.
1000
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1001
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1002
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1003
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1004
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1005
+ usually at the expense of lower image quality.
1006
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1007
+ The number of images to generate per prompt.
1008
+ eta (`float`, *optional*, defaults to 0.0):
1009
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1010
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1011
+ generator (`torch.Generator`, *optional*):
1012
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1013
+ deterministic.
1014
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1015
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1016
+ output_type (`str`, *optional*, defaults to `"pil"`):
1017
+ The output format of the generate image. Choose between
1018
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1019
+ return_dict (`bool`, *optional*, defaults to `True`):
1020
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1021
+ plain tuple.
1022
+ callback (`Callable`, *optional*):
1023
+ A function that will be called every `callback_steps` steps during inference. The function will be
1024
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
1025
+ callback_steps (`int`, *optional*, defaults to 1):
1026
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1027
+ called at every step.
1028
+ Returns:
1029
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1030
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1031
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1032
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1033
+ (nsfw) content, according to the `safety_checker`.
1034
+ """
1035
+ return self.__call__(
1036
+ prompt=prompt,
1037
+ negative_prompt=negative_prompt,
1038
+ image=image,
1039
+ num_inference_steps=num_inference_steps,
1040
+ guidance_scale=guidance_scale,
1041
+ strength=strength,
1042
+ num_images_per_prompt=num_images_per_prompt,
1043
+ eta=eta,
1044
+ generator=generator,
1045
+ max_embeddings_multiples=max_embeddings_multiples,
1046
+ output_type=output_type,
1047
+ return_dict=return_dict,
1048
+ callback=callback,
1049
+ callback_steps=callback_steps,
1050
+ **kwargs,
1051
+ )
1052
+
1053
+ def inpaint(
1054
+ self,
1055
+ image: Union[np.ndarray, PIL.Image.Image],
1056
+ mask_image: Union[np.ndarray, PIL.Image.Image],
1057
+ prompt: Union[str, List[str]],
1058
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1059
+ strength: float = 0.8,
1060
+ num_inference_steps: Optional[int] = 50,
1061
+ guidance_scale: Optional[float] = 7.5,
1062
+ num_images_per_prompt: Optional[int] = 1,
1063
+ eta: Optional[float] = 0.0,
1064
+ generator: Optional[torch.Generator] = None,
1065
+ max_embeddings_multiples: Optional[int] = 3,
1066
+ output_type: Optional[str] = "pil",
1067
+ return_dict: bool = True,
1068
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
1069
+ callback_steps: int = 1,
1070
+ **kwargs,
1071
+ ):
1072
+ r"""
1073
+ Function for inpaint.
1074
+ Args:
1075
+ image (`np.ndarray` or `PIL.Image.Image`):
1076
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1077
+ process. This is the image whose masked region will be inpainted.
1078
+ mask_image (`np.ndarray` or `PIL.Image.Image`):
1079
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1080
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1081
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1082
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1083
+ prompt (`str` or `List[str]`):
1084
+ The prompt or prompts to guide the image generation.
1085
+ negative_prompt (`str` or `List[str]`, *optional*):
1086
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1087
+ if `guidance_scale` is less than `1`).
1088
+ strength (`float`, *optional*, defaults to 0.8):
1089
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1090
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
1091
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1092
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1093
+ num_inference_steps (`int`, *optional*, defaults to 50):
1094
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1095
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1096
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1097
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1098
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1099
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1100
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1101
+ usually at the expense of lower image quality.
1102
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1103
+ The number of images to generate per prompt.
1104
+ eta (`float`, *optional*, defaults to 0.0):
1105
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1106
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1107
+ generator (`torch.Generator`, *optional*):
1108
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1109
+ deterministic.
1110
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1111
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1112
+ output_type (`str`, *optional*, defaults to `"pil"`):
1113
+ The output format of the generate image. Choose between
1114
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1115
+ return_dict (`bool`, *optional*, defaults to `True`):
1116
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1117
+ plain tuple.
1118
+ callback (`Callable`, *optional*):
1119
+ A function that will be called every `callback_steps` steps during inference. The function will be
1120
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
1121
+ callback_steps (`int`, *optional*, defaults to 1):
1122
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1123
+ called at every step.
1124
+ Returns:
1125
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1126
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1127
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1128
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1129
+ (nsfw) content, according to the `safety_checker`.
1130
+ """
1131
+ return self.__call__(
1132
+ prompt=prompt,
1133
+ negative_prompt=negative_prompt,
1134
+ image=image,
1135
+ mask_image=mask_image,
1136
+ num_inference_steps=num_inference_steps,
1137
+ guidance_scale=guidance_scale,
1138
+ strength=strength,
1139
+ num_images_per_prompt=num_images_per_prompt,
1140
+ eta=eta,
1141
+ generator=generator,
1142
+ max_embeddings_multiples=max_embeddings_multiples,
1143
+ output_type=output_type,
1144
+ return_dict=return_dict,
1145
+ callback=callback,
1146
+ callback_steps=callback_steps,
1147
+ **kwargs,
1148
+ )
v0.24.0/lpw_stable_diffusion_xl.py ADDED
@@ -0,0 +1,1312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## ----------------------------------------------------------
2
+ # A SDXL pipeline can take unlimited weighted prompt
3
+ #
4
+ # Author: Andrew Zhu
5
+ # Github: https://github.com/xhinker
6
+ # Medium: https://medium.com/@xhinker
7
+ ## -----------------------------------------------------------
8
+
9
+ import inspect
10
+ import os
11
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
12
+
13
+ import torch
14
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
15
+
16
+ from diffusers import DiffusionPipeline, StableDiffusionXLPipeline
17
+ from diffusers.image_processor import VaeImageProcessor
18
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
19
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
20
+ from diffusers.models.attention_processor import (
21
+ AttnProcessor2_0,
22
+ LoRAAttnProcessor2_0,
23
+ LoRAXFormersAttnProcessor,
24
+ XFormersAttnProcessor,
25
+ )
26
+ from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
27
+ from diffusers.schedulers import KarrasDiffusionSchedulers
28
+ from diffusers.utils import (
29
+ is_accelerate_available,
30
+ is_accelerate_version,
31
+ is_invisible_watermark_available,
32
+ logging,
33
+ replace_example_docstring,
34
+ )
35
+ from diffusers.utils.torch_utils import randn_tensor
36
+
37
+
38
+ if is_invisible_watermark_available():
39
+ from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
40
+
41
+
42
+ def parse_prompt_attention(text):
43
+ """
44
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
45
+ Accepted tokens are:
46
+ (abc) - increases attention to abc by a multiplier of 1.1
47
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
48
+ [abc] - decreases attention to abc by a multiplier of 1.1
49
+ \\( - literal character '('
50
+ \\[ - literal character '['
51
+ \\) - literal character ')'
52
+ \\] - literal character ']'
53
+ \\ - literal character '\'
54
+ anything else - just text
55
+
56
+ >>> parse_prompt_attention('normal text')
57
+ [['normal text', 1.0]]
58
+ >>> parse_prompt_attention('an (important) word')
59
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
60
+ >>> parse_prompt_attention('(unbalanced')
61
+ [['unbalanced', 1.1]]
62
+ >>> parse_prompt_attention('\\(literal\\]')
63
+ [['(literal]', 1.0]]
64
+ >>> parse_prompt_attention('(unnecessary)(parens)')
65
+ [['unnecessaryparens', 1.1]]
66
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
67
+ [['a ', 1.0],
68
+ ['house', 1.5730000000000004],
69
+ [' ', 1.1],
70
+ ['on', 1.0],
71
+ [' a ', 1.1],
72
+ ['hill', 0.55],
73
+ [', sun, ', 1.1],
74
+ ['sky', 1.4641000000000006],
75
+ ['.', 1.1]]
76
+ """
77
+ import re
78
+
79
+ re_attention = re.compile(
80
+ r"""
81
+ \\\(|\\\)|\\\[|\\]|\\\\|\\|\(|\[|:([+-]?[.\d]+)\)|
82
+ \)|]|[^\\()\[\]:]+|:
83
+ """,
84
+ re.X,
85
+ )
86
+
87
+ re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
88
+
89
+ res = []
90
+ round_brackets = []
91
+ square_brackets = []
92
+
93
+ round_bracket_multiplier = 1.1
94
+ square_bracket_multiplier = 1 / 1.1
95
+
96
+ def multiply_range(start_position, multiplier):
97
+ for p in range(start_position, len(res)):
98
+ res[p][1] *= multiplier
99
+
100
+ for m in re_attention.finditer(text):
101
+ text = m.group(0)
102
+ weight = m.group(1)
103
+
104
+ if text.startswith("\\"):
105
+ res.append([text[1:], 1.0])
106
+ elif text == "(":
107
+ round_brackets.append(len(res))
108
+ elif text == "[":
109
+ square_brackets.append(len(res))
110
+ elif weight is not None and len(round_brackets) > 0:
111
+ multiply_range(round_brackets.pop(), float(weight))
112
+ elif text == ")" and len(round_brackets) > 0:
113
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
114
+ elif text == "]" and len(square_brackets) > 0:
115
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
116
+ else:
117
+ parts = re.split(re_break, text)
118
+ for i, part in enumerate(parts):
119
+ if i > 0:
120
+ res.append(["BREAK", -1])
121
+ res.append([part, 1.0])
122
+
123
+ for pos in round_brackets:
124
+ multiply_range(pos, round_bracket_multiplier)
125
+
126
+ for pos in square_brackets:
127
+ multiply_range(pos, square_bracket_multiplier)
128
+
129
+ if len(res) == 0:
130
+ res = [["", 1.0]]
131
+
132
+ # merge runs of identical weights
133
+ i = 0
134
+ while i + 1 < len(res):
135
+ if res[i][1] == res[i + 1][1]:
136
+ res[i][0] += res[i + 1][0]
137
+ res.pop(i + 1)
138
+ else:
139
+ i += 1
140
+
141
+ return res
142
+
143
+
144
+ def get_prompts_tokens_with_weights(clip_tokenizer: CLIPTokenizer, prompt: str):
145
+ """
146
+ Get prompt token ids and weights, this function works for both prompt and negative prompt
147
+
148
+ Args:
149
+ pipe (CLIPTokenizer)
150
+ A CLIPTokenizer
151
+ prompt (str)
152
+ A prompt string with weights
153
+
154
+ Returns:
155
+ text_tokens (list)
156
+ A list contains token ids
157
+ text_weight (list)
158
+ A list contains the correspodent weight of token ids
159
+
160
+ Example:
161
+ import torch
162
+ from transformers import CLIPTokenizer
163
+
164
+ clip_tokenizer = CLIPTokenizer.from_pretrained(
165
+ "stablediffusionapi/deliberate-v2"
166
+ , subfolder = "tokenizer"
167
+ , dtype = torch.float16
168
+ )
169
+
170
+ token_id_list, token_weight_list = get_prompts_tokens_with_weights(
171
+ clip_tokenizer = clip_tokenizer
172
+ ,prompt = "a (red:1.5) cat"*70
173
+ )
174
+ """
175
+ texts_and_weights = parse_prompt_attention(prompt)
176
+ text_tokens, text_weights = [], []
177
+ for word, weight in texts_and_weights:
178
+ # tokenize and discard the starting and the ending token
179
+ token = clip_tokenizer(word, truncation=False).input_ids[1:-1] # so that tokenize whatever length prompt
180
+ # the returned token is a 1d list: [320, 1125, 539, 320]
181
+
182
+ # merge the new tokens to the all tokens holder: text_tokens
183
+ text_tokens = [*text_tokens, *token]
184
+
185
+ # each token chunk will come with one weight, like ['red cat', 2.0]
186
+ # need to expand weight for each token.
187
+ chunk_weights = [weight] * len(token)
188
+
189
+ # append the weight back to the weight holder: text_weights
190
+ text_weights = [*text_weights, *chunk_weights]
191
+ return text_tokens, text_weights
192
+
193
+
194
+ def group_tokens_and_weights(token_ids: list, weights: list, pad_last_block=False):
195
+ """
196
+ Produce tokens and weights in groups and pad the missing tokens
197
+
198
+ Args:
199
+ token_ids (list)
200
+ The token ids from tokenizer
201
+ weights (list)
202
+ The weights list from function get_prompts_tokens_with_weights
203
+ pad_last_block (bool)
204
+ Control if fill the last token list to 75 tokens with eos
205
+ Returns:
206
+ new_token_ids (2d list)
207
+ new_weights (2d list)
208
+
209
+ Example:
210
+ token_groups,weight_groups = group_tokens_and_weights(
211
+ token_ids = token_id_list
212
+ , weights = token_weight_list
213
+ )
214
+ """
215
+ bos, eos = 49406, 49407
216
+
217
+ # this will be a 2d list
218
+ new_token_ids = []
219
+ new_weights = []
220
+ while len(token_ids) >= 75:
221
+ # get the first 75 tokens
222
+ head_75_tokens = [token_ids.pop(0) for _ in range(75)]
223
+ head_75_weights = [weights.pop(0) for _ in range(75)]
224
+
225
+ # extract token ids and weights
226
+ temp_77_token_ids = [bos] + head_75_tokens + [eos]
227
+ temp_77_weights = [1.0] + head_75_weights + [1.0]
228
+
229
+ # add 77 token and weights chunk to the holder list
230
+ new_token_ids.append(temp_77_token_ids)
231
+ new_weights.append(temp_77_weights)
232
+
233
+ # padding the left
234
+ if len(token_ids) > 0:
235
+ padding_len = 75 - len(token_ids) if pad_last_block else 0
236
+
237
+ temp_77_token_ids = [bos] + token_ids + [eos] * padding_len + [eos]
238
+ new_token_ids.append(temp_77_token_ids)
239
+
240
+ temp_77_weights = [1.0] + weights + [1.0] * padding_len + [1.0]
241
+ new_weights.append(temp_77_weights)
242
+
243
+ return new_token_ids, new_weights
244
+
245
+
246
+ def get_weighted_text_embeddings_sdxl(
247
+ pipe: StableDiffusionXLPipeline,
248
+ prompt: str = "",
249
+ prompt_2: str = None,
250
+ neg_prompt: str = "",
251
+ neg_prompt_2: str = None,
252
+ num_images_per_prompt: int = 1,
253
+ device: Optional[torch.device] = None,
254
+ ):
255
+ """
256
+ This function can process long prompt with weights, no length limitation
257
+ for Stable Diffusion XL
258
+
259
+ Args:
260
+ pipe (StableDiffusionPipeline)
261
+ prompt (str)
262
+ prompt_2 (str)
263
+ neg_prompt (str)
264
+ neg_prompt_2 (str)
265
+ num_images_per_prompt (int)
266
+ device (torch.device)
267
+ Returns:
268
+ prompt_embeds (torch.Tensor)
269
+ neg_prompt_embeds (torch.Tensor)
270
+ """
271
+ device = device or pipe._execution_device
272
+
273
+ if prompt_2:
274
+ prompt = f"{prompt} {prompt_2}"
275
+
276
+ if neg_prompt_2:
277
+ neg_prompt = f"{neg_prompt} {neg_prompt_2}"
278
+
279
+ eos = pipe.tokenizer.eos_token_id
280
+
281
+ # tokenizer 1
282
+ prompt_tokens, prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, prompt)
283
+
284
+ neg_prompt_tokens, neg_prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, neg_prompt)
285
+
286
+ # tokenizer 2
287
+ prompt_tokens_2, prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, prompt)
288
+
289
+ neg_prompt_tokens_2, neg_prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, neg_prompt)
290
+
291
+ # padding the shorter one for prompt set 1
292
+ prompt_token_len = len(prompt_tokens)
293
+ neg_prompt_token_len = len(neg_prompt_tokens)
294
+
295
+ if prompt_token_len > neg_prompt_token_len:
296
+ # padding the neg_prompt with eos token
297
+ neg_prompt_tokens = neg_prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len)
298
+ neg_prompt_weights = neg_prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len)
299
+ else:
300
+ # padding the prompt
301
+ prompt_tokens = prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len)
302
+ prompt_weights = prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len)
303
+
304
+ # padding the shorter one for token set 2
305
+ prompt_token_len_2 = len(prompt_tokens_2)
306
+ neg_prompt_token_len_2 = len(neg_prompt_tokens_2)
307
+
308
+ if prompt_token_len_2 > neg_prompt_token_len_2:
309
+ # padding the neg_prompt with eos token
310
+ neg_prompt_tokens_2 = neg_prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
311
+ neg_prompt_weights_2 = neg_prompt_weights_2 + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
312
+ else:
313
+ # padding the prompt
314
+ prompt_tokens_2 = prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
315
+ prompt_weights_2 = prompt_weights + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
316
+
317
+ embeds = []
318
+ neg_embeds = []
319
+
320
+ prompt_token_groups, prompt_weight_groups = group_tokens_and_weights(prompt_tokens.copy(), prompt_weights.copy())
321
+
322
+ neg_prompt_token_groups, neg_prompt_weight_groups = group_tokens_and_weights(
323
+ neg_prompt_tokens.copy(), neg_prompt_weights.copy()
324
+ )
325
+
326
+ prompt_token_groups_2, prompt_weight_groups_2 = group_tokens_and_weights(
327
+ prompt_tokens_2.copy(), prompt_weights_2.copy()
328
+ )
329
+
330
+ neg_prompt_token_groups_2, neg_prompt_weight_groups_2 = group_tokens_and_weights(
331
+ neg_prompt_tokens_2.copy(), neg_prompt_weights_2.copy()
332
+ )
333
+
334
+ # get prompt embeddings one by one is not working.
335
+ for i in range(len(prompt_token_groups)):
336
+ # get positive prompt embeddings with weights
337
+ token_tensor = torch.tensor([prompt_token_groups[i]], dtype=torch.long, device=device)
338
+ weight_tensor = torch.tensor(prompt_weight_groups[i], dtype=torch.float16, device=device)
339
+
340
+ token_tensor_2 = torch.tensor([prompt_token_groups_2[i]], dtype=torch.long, device=device)
341
+
342
+ # use first text encoder
343
+ prompt_embeds_1 = pipe.text_encoder(token_tensor.to(device), output_hidden_states=True)
344
+ prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-2]
345
+
346
+ # use second text encoder
347
+ prompt_embeds_2 = pipe.text_encoder_2(token_tensor_2.to(device), output_hidden_states=True)
348
+ prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-2]
349
+ pooled_prompt_embeds = prompt_embeds_2[0]
350
+
351
+ prompt_embeds_list = [prompt_embeds_1_hidden_states, prompt_embeds_2_hidden_states]
352
+ token_embedding = torch.concat(prompt_embeds_list, dim=-1).squeeze(0)
353
+
354
+ for j in range(len(weight_tensor)):
355
+ if weight_tensor[j] != 1.0:
356
+ token_embedding[j] = (
357
+ token_embedding[-1] + (token_embedding[j] - token_embedding[-1]) * weight_tensor[j]
358
+ )
359
+
360
+ token_embedding = token_embedding.unsqueeze(0)
361
+ embeds.append(token_embedding)
362
+
363
+ # get negative prompt embeddings with weights
364
+ neg_token_tensor = torch.tensor([neg_prompt_token_groups[i]], dtype=torch.long, device=device)
365
+ neg_token_tensor_2 = torch.tensor([neg_prompt_token_groups_2[i]], dtype=torch.long, device=device)
366
+ neg_weight_tensor = torch.tensor(neg_prompt_weight_groups[i], dtype=torch.float16, device=device)
367
+
368
+ # use first text encoder
369
+ neg_prompt_embeds_1 = pipe.text_encoder(neg_token_tensor.to(device), output_hidden_states=True)
370
+ neg_prompt_embeds_1_hidden_states = neg_prompt_embeds_1.hidden_states[-2]
371
+
372
+ # use second text encoder
373
+ neg_prompt_embeds_2 = pipe.text_encoder_2(neg_token_tensor_2.to(device), output_hidden_states=True)
374
+ neg_prompt_embeds_2_hidden_states = neg_prompt_embeds_2.hidden_states[-2]
375
+ negative_pooled_prompt_embeds = neg_prompt_embeds_2[0]
376
+
377
+ neg_prompt_embeds_list = [neg_prompt_embeds_1_hidden_states, neg_prompt_embeds_2_hidden_states]
378
+ neg_token_embedding = torch.concat(neg_prompt_embeds_list, dim=-1).squeeze(0)
379
+
380
+ for z in range(len(neg_weight_tensor)):
381
+ if neg_weight_tensor[z] != 1.0:
382
+ neg_token_embedding[z] = (
383
+ neg_token_embedding[-1] + (neg_token_embedding[z] - neg_token_embedding[-1]) * neg_weight_tensor[z]
384
+ )
385
+
386
+ neg_token_embedding = neg_token_embedding.unsqueeze(0)
387
+ neg_embeds.append(neg_token_embedding)
388
+
389
+ prompt_embeds = torch.cat(embeds, dim=1)
390
+ negative_prompt_embeds = torch.cat(neg_embeds, dim=1)
391
+
392
+ bs_embed, seq_len, _ = prompt_embeds.shape
393
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
394
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
395
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
396
+
397
+ seq_len = negative_prompt_embeds.shape[1]
398
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
399
+ negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
400
+
401
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view(
402
+ bs_embed * num_images_per_prompt, -1
403
+ )
404
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view(
405
+ bs_embed * num_images_per_prompt, -1
406
+ )
407
+
408
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
409
+
410
+
411
+ # -------------------------------------------------------------------------------------------------------------------------------
412
+ # reuse the backbone code from StableDiffusionXLPipeline
413
+ # -------------------------------------------------------------------------------------------------------------------------------
414
+
415
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
416
+
417
+ EXAMPLE_DOC_STRING = """
418
+ Examples:
419
+ ```py
420
+ from diffusers import DiffusionPipeline
421
+ import torch
422
+
423
+ pipe = DiffusionPipeline.from_pretrained(
424
+ "stabilityai/stable-diffusion-xl-base-1.0"
425
+ , torch_dtype = torch.float16
426
+ , use_safetensors = True
427
+ , variant = "fp16"
428
+ , custom_pipeline = "lpw_stable_diffusion_xl",
429
+ )
430
+
431
+ prompt = "a white cat running on the grass"*20
432
+ prompt2 = "play a football"*20
433
+ prompt = f"{prompt},{prompt2}"
434
+ neg_prompt = "blur, low quality"
435
+
436
+ pipe.to("cuda")
437
+ images = pipe(
438
+ prompt = prompt
439
+ , negative_prompt = neg_prompt
440
+ ).images[0]
441
+
442
+ pipe.to("cpu")
443
+ torch.cuda.empty_cache()
444
+ images
445
+ ```
446
+ """
447
+
448
+
449
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
450
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
451
+ """
452
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
453
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
454
+ """
455
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
456
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
457
+ # rescale the results from guidance (fixes overexposure)
458
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
459
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
460
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
461
+ return noise_cfg
462
+
463
+
464
+ class SDXLLongPromptWeightingPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
465
+ r"""
466
+ Pipeline for text-to-image generation using Stable Diffusion XL.
467
+
468
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
469
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
470
+
471
+ In addition the pipeline inherits the following loading methods:
472
+ - *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
473
+ - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
474
+
475
+ as well as the following saving methods:
476
+ - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
477
+
478
+ Args:
479
+ vae ([`AutoencoderKL`]):
480
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
481
+ text_encoder ([`CLIPTextModel`]):
482
+ Frozen text-encoder. Stable Diffusion XL uses the text portion of
483
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
484
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
485
+ text_encoder_2 ([` CLIPTextModelWithProjection`]):
486
+ Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
487
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
488
+ specifically the
489
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
490
+ variant.
491
+ tokenizer (`CLIPTokenizer`):
492
+ Tokenizer of class
493
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
494
+ tokenizer_2 (`CLIPTokenizer`):
495
+ Second Tokenizer of class
496
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
497
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
498
+ scheduler ([`SchedulerMixin`]):
499
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
500
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
501
+ """
502
+
503
+ def __init__(
504
+ self,
505
+ vae: AutoencoderKL,
506
+ text_encoder: CLIPTextModel,
507
+ text_encoder_2: CLIPTextModelWithProjection,
508
+ tokenizer: CLIPTokenizer,
509
+ tokenizer_2: CLIPTokenizer,
510
+ unet: UNet2DConditionModel,
511
+ scheduler: KarrasDiffusionSchedulers,
512
+ force_zeros_for_empty_prompt: bool = True,
513
+ add_watermarker: Optional[bool] = None,
514
+ ):
515
+ super().__init__()
516
+
517
+ self.register_modules(
518
+ vae=vae,
519
+ text_encoder=text_encoder,
520
+ text_encoder_2=text_encoder_2,
521
+ tokenizer=tokenizer,
522
+ tokenizer_2=tokenizer_2,
523
+ unet=unet,
524
+ scheduler=scheduler,
525
+ )
526
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
527
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
528
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
529
+ self.default_sample_size = self.unet.config.sample_size
530
+
531
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
532
+
533
+ if add_watermarker:
534
+ self.watermark = StableDiffusionXLWatermarker()
535
+ else:
536
+ self.watermark = None
537
+
538
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
539
+ def enable_vae_slicing(self):
540
+ r"""
541
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
542
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
543
+ """
544
+ self.vae.enable_slicing()
545
+
546
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
547
+ def disable_vae_slicing(self):
548
+ r"""
549
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
550
+ computing decoding in one step.
551
+ """
552
+ self.vae.disable_slicing()
553
+
554
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
555
+ def enable_vae_tiling(self):
556
+ r"""
557
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
558
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
559
+ processing larger images.
560
+ """
561
+ self.vae.enable_tiling()
562
+
563
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
564
+ def disable_vae_tiling(self):
565
+ r"""
566
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
567
+ computing decoding in one step.
568
+ """
569
+ self.vae.disable_tiling()
570
+
571
+ def enable_model_cpu_offload(self, gpu_id=0):
572
+ r"""
573
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
574
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
575
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
576
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
577
+ """
578
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
579
+ from accelerate import cpu_offload_with_hook
580
+ else:
581
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
582
+
583
+ device = torch.device(f"cuda:{gpu_id}")
584
+
585
+ if self.device.type != "cpu":
586
+ self.to("cpu", silence_dtype_warnings=True)
587
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
588
+
589
+ model_sequence = (
590
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
591
+ )
592
+ model_sequence.extend([self.unet, self.vae])
593
+
594
+ hook = None
595
+ for cpu_offloaded_model in model_sequence:
596
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
597
+
598
+ # We'll offload the last model manually.
599
+ self.final_offload_hook = hook
600
+
601
+ def encode_prompt(
602
+ self,
603
+ prompt: str,
604
+ prompt_2: Optional[str] = None,
605
+ device: Optional[torch.device] = None,
606
+ num_images_per_prompt: int = 1,
607
+ do_classifier_free_guidance: bool = True,
608
+ negative_prompt: Optional[str] = None,
609
+ negative_prompt_2: Optional[str] = None,
610
+ prompt_embeds: Optional[torch.FloatTensor] = None,
611
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
612
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
613
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
614
+ lora_scale: Optional[float] = None,
615
+ ):
616
+ r"""
617
+ Encodes the prompt into text encoder hidden states.
618
+
619
+ Args:
620
+ prompt (`str` or `List[str]`, *optional*):
621
+ prompt to be encoded
622
+ prompt_2 (`str` or `List[str]`, *optional*):
623
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
624
+ used in both text-encoders
625
+ device: (`torch.device`):
626
+ torch device
627
+ num_images_per_prompt (`int`):
628
+ number of images that should be generated per prompt
629
+ do_classifier_free_guidance (`bool`):
630
+ whether to use classifier free guidance or not
631
+ negative_prompt (`str` or `List[str]`, *optional*):
632
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
633
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
634
+ less than `1`).
635
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
636
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
637
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
638
+ prompt_embeds (`torch.FloatTensor`, *optional*):
639
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
640
+ provided, text embeddings will be generated from `prompt` input argument.
641
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
642
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
643
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
644
+ argument.
645
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
646
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
647
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
648
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
649
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
650
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
651
+ input argument.
652
+ lora_scale (`float`, *optional*):
653
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
654
+ """
655
+ device = device or self._execution_device
656
+
657
+ # set lora scale so that monkey patched LoRA
658
+ # function of text encoder can correctly access it
659
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
660
+ self._lora_scale = lora_scale
661
+
662
+ if prompt is not None and isinstance(prompt, str):
663
+ batch_size = 1
664
+ elif prompt is not None and isinstance(prompt, list):
665
+ batch_size = len(prompt)
666
+ else:
667
+ batch_size = prompt_embeds.shape[0]
668
+
669
+ # Define tokenizers and text encoders
670
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
671
+ text_encoders = (
672
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
673
+ )
674
+
675
+ if prompt_embeds is None:
676
+ prompt_2 = prompt_2 or prompt
677
+ # textual inversion: procecss multi-vector tokens if necessary
678
+ prompt_embeds_list = []
679
+ prompts = [prompt, prompt_2]
680
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
681
+ if isinstance(self, TextualInversionLoaderMixin):
682
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
683
+
684
+ text_inputs = tokenizer(
685
+ prompt,
686
+ padding="max_length",
687
+ max_length=tokenizer.model_max_length,
688
+ truncation=True,
689
+ return_tensors="pt",
690
+ )
691
+
692
+ text_input_ids = text_inputs.input_ids
693
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
694
+
695
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
696
+ text_input_ids, untruncated_ids
697
+ ):
698
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
699
+ logger.warning(
700
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
701
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
702
+ )
703
+
704
+ prompt_embeds = text_encoder(
705
+ text_input_ids.to(device),
706
+ output_hidden_states=True,
707
+ )
708
+
709
+ # We are only ALWAYS interested in the pooled output of the final text encoder
710
+ pooled_prompt_embeds = prompt_embeds[0]
711
+ prompt_embeds = prompt_embeds.hidden_states[-2]
712
+
713
+ prompt_embeds_list.append(prompt_embeds)
714
+
715
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
716
+
717
+ # get unconditional embeddings for classifier free guidance
718
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
719
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
720
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
721
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
722
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
723
+ negative_prompt = negative_prompt or ""
724
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
725
+
726
+ uncond_tokens: List[str]
727
+ if prompt is not None and type(prompt) is not type(negative_prompt):
728
+ raise TypeError(
729
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
730
+ f" {type(prompt)}."
731
+ )
732
+ elif isinstance(negative_prompt, str):
733
+ uncond_tokens = [negative_prompt, negative_prompt_2]
734
+ elif batch_size != len(negative_prompt):
735
+ raise ValueError(
736
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
737
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
738
+ " the batch size of `prompt`."
739
+ )
740
+ else:
741
+ uncond_tokens = [negative_prompt, negative_prompt_2]
742
+
743
+ negative_prompt_embeds_list = []
744
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
745
+ if isinstance(self, TextualInversionLoaderMixin):
746
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
747
+
748
+ max_length = prompt_embeds.shape[1]
749
+ uncond_input = tokenizer(
750
+ negative_prompt,
751
+ padding="max_length",
752
+ max_length=max_length,
753
+ truncation=True,
754
+ return_tensors="pt",
755
+ )
756
+
757
+ negative_prompt_embeds = text_encoder(
758
+ uncond_input.input_ids.to(device),
759
+ output_hidden_states=True,
760
+ )
761
+ # We are only ALWAYS interested in the pooled output of the final text encoder
762
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
763
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
764
+
765
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
766
+
767
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
768
+
769
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
770
+ bs_embed, seq_len, _ = prompt_embeds.shape
771
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
772
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
773
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
774
+
775
+ if do_classifier_free_guidance:
776
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
777
+ seq_len = negative_prompt_embeds.shape[1]
778
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
779
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
780
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
781
+
782
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
783
+ bs_embed * num_images_per_prompt, -1
784
+ )
785
+ if do_classifier_free_guidance:
786
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
787
+ bs_embed * num_images_per_prompt, -1
788
+ )
789
+
790
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
791
+
792
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
793
+ def prepare_extra_step_kwargs(self, generator, eta):
794
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
795
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
796
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
797
+ # and should be between [0, 1]
798
+
799
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
800
+ extra_step_kwargs = {}
801
+ if accepts_eta:
802
+ extra_step_kwargs["eta"] = eta
803
+
804
+ # check if the scheduler accepts generator
805
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
806
+ if accepts_generator:
807
+ extra_step_kwargs["generator"] = generator
808
+ return extra_step_kwargs
809
+
810
+ def check_inputs(
811
+ self,
812
+ prompt,
813
+ prompt_2,
814
+ height,
815
+ width,
816
+ callback_steps,
817
+ negative_prompt=None,
818
+ negative_prompt_2=None,
819
+ prompt_embeds=None,
820
+ negative_prompt_embeds=None,
821
+ pooled_prompt_embeds=None,
822
+ negative_pooled_prompt_embeds=None,
823
+ ):
824
+ if height % 8 != 0 or width % 8 != 0:
825
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
826
+
827
+ if (callback_steps is None) or (
828
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
829
+ ):
830
+ raise ValueError(
831
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
832
+ f" {type(callback_steps)}."
833
+ )
834
+
835
+ if prompt is not None and prompt_embeds is not None:
836
+ raise ValueError(
837
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
838
+ " only forward one of the two."
839
+ )
840
+ elif prompt_2 is not None and prompt_embeds is not None:
841
+ raise ValueError(
842
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
843
+ " only forward one of the two."
844
+ )
845
+ elif prompt is None and prompt_embeds is None:
846
+ raise ValueError(
847
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
848
+ )
849
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
850
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
851
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
852
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
853
+
854
+ if negative_prompt is not None and negative_prompt_embeds is not None:
855
+ raise ValueError(
856
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
857
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
858
+ )
859
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
860
+ raise ValueError(
861
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
862
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
863
+ )
864
+
865
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
866
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
867
+ raise ValueError(
868
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
869
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
870
+ f" {negative_prompt_embeds.shape}."
871
+ )
872
+
873
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
874
+ raise ValueError(
875
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
876
+ )
877
+
878
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
879
+ raise ValueError(
880
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
881
+ )
882
+
883
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
884
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
885
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
886
+ if isinstance(generator, list) and len(generator) != batch_size:
887
+ raise ValueError(
888
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
889
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
890
+ )
891
+
892
+ if latents is None:
893
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
894
+ else:
895
+ latents = latents.to(device)
896
+
897
+ # scale the initial noise by the standard deviation required by the scheduler
898
+ latents = latents * self.scheduler.init_noise_sigma
899
+ return latents
900
+
901
+ def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
902
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
903
+
904
+ passed_add_embed_dim = (
905
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
906
+ )
907
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
908
+
909
+ if expected_add_embed_dim != passed_add_embed_dim:
910
+ raise ValueError(
911
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
912
+ )
913
+
914
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
915
+ return add_time_ids
916
+
917
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
918
+ def upcast_vae(self):
919
+ dtype = self.vae.dtype
920
+ self.vae.to(dtype=torch.float32)
921
+ use_torch_2_0_or_xformers = isinstance(
922
+ self.vae.decoder.mid_block.attentions[0].processor,
923
+ (
924
+ AttnProcessor2_0,
925
+ XFormersAttnProcessor,
926
+ LoRAXFormersAttnProcessor,
927
+ LoRAAttnProcessor2_0,
928
+ ),
929
+ )
930
+ # if xformers or torch_2_0 is used attention block does not need
931
+ # to be in float32 which can save lots of memory
932
+ if use_torch_2_0_or_xformers:
933
+ self.vae.post_quant_conv.to(dtype)
934
+ self.vae.decoder.conv_in.to(dtype)
935
+ self.vae.decoder.mid_block.to(dtype)
936
+
937
+ @torch.no_grad()
938
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
939
+ def __call__(
940
+ self,
941
+ prompt: str = None,
942
+ prompt_2: Optional[str] = None,
943
+ height: Optional[int] = None,
944
+ width: Optional[int] = None,
945
+ num_inference_steps: int = 50,
946
+ denoising_end: Optional[float] = None,
947
+ guidance_scale: float = 5.0,
948
+ negative_prompt: Optional[str] = None,
949
+ negative_prompt_2: Optional[str] = None,
950
+ num_images_per_prompt: Optional[int] = 1,
951
+ eta: float = 0.0,
952
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
953
+ latents: Optional[torch.FloatTensor] = None,
954
+ prompt_embeds: Optional[torch.FloatTensor] = None,
955
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
956
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
957
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
958
+ output_type: Optional[str] = "pil",
959
+ return_dict: bool = True,
960
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
961
+ callback_steps: int = 1,
962
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
963
+ guidance_rescale: float = 0.0,
964
+ original_size: Optional[Tuple[int, int]] = None,
965
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
966
+ target_size: Optional[Tuple[int, int]] = None,
967
+ ):
968
+ r"""
969
+ Function invoked when calling the pipeline for generation.
970
+
971
+ Args:
972
+ prompt (`str`):
973
+ The prompt to guide the image generation. If not defined, one has to pass `prompt_embeds`.
974
+ instead.
975
+ prompt_2 (`str`):
976
+ The prompt to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
977
+ used in both text-encoders
978
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
979
+ The height in pixels of the generated image.
980
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
981
+ The width in pixels of the generated image.
982
+ num_inference_steps (`int`, *optional*, defaults to 50):
983
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
984
+ expense of slower inference.
985
+ denoising_end (`float`, *optional*):
986
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
987
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
988
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
989
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
990
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
991
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
992
+ guidance_scale (`float`, *optional*, defaults to 5.0):
993
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
994
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
995
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
996
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
997
+ usually at the expense of lower image quality.
998
+ negative_prompt (`str`):
999
+ The prompt not to guide the image generation. If not defined, one has to pass
1000
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1001
+ less than `1`).
1002
+ negative_prompt_2 (`str`):
1003
+ The prompt not to guide the image generation to be sent to `tokenizer_2` and
1004
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1005
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1006
+ The number of images to generate per prompt.
1007
+ eta (`float`, *optional*, defaults to 0.0):
1008
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1009
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1010
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1011
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1012
+ to make generation deterministic.
1013
+ latents (`torch.FloatTensor`, *optional*):
1014
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1015
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1016
+ tensor will ge generated by sampling using the supplied random `generator`.
1017
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1018
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1019
+ provided, text embeddings will be generated from `prompt` input argument.
1020
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1021
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1022
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1023
+ argument.
1024
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1025
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1026
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1027
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1028
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1029
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1030
+ input argument.
1031
+ output_type (`str`, *optional*, defaults to `"pil"`):
1032
+ The output format of the generate image. Choose between
1033
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1034
+ return_dict (`bool`, *optional*, defaults to `True`):
1035
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
1036
+ of a plain tuple.
1037
+ callback (`Callable`, *optional*):
1038
+ A function that will be called every `callback_steps` steps during inference. The function will be
1039
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1040
+ callback_steps (`int`, *optional*, defaults to 1):
1041
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1042
+ called at every step.
1043
+ cross_attention_kwargs (`dict`, *optional*):
1044
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1045
+ `self.processor` in
1046
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1047
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
1048
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1049
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1050
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1051
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
1052
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1053
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1054
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1055
+ explained in section 2.2 of
1056
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1057
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1058
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1059
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1060
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1061
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1062
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1063
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1064
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1065
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1066
+
1067
+ Examples:
1068
+
1069
+ Returns:
1070
+ [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
1071
+ [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
1072
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
1073
+ """
1074
+ # 0. Default height and width to unet
1075
+ height = height or self.default_sample_size * self.vae_scale_factor
1076
+ width = width or self.default_sample_size * self.vae_scale_factor
1077
+
1078
+ original_size = original_size or (height, width)
1079
+ target_size = target_size or (height, width)
1080
+
1081
+ # 1. Check inputs. Raise error if not correct
1082
+ self.check_inputs(
1083
+ prompt,
1084
+ prompt_2,
1085
+ height,
1086
+ width,
1087
+ callback_steps,
1088
+ negative_prompt,
1089
+ negative_prompt_2,
1090
+ prompt_embeds,
1091
+ negative_prompt_embeds,
1092
+ pooled_prompt_embeds,
1093
+ negative_pooled_prompt_embeds,
1094
+ )
1095
+
1096
+ # 2. Define call parameters
1097
+ if prompt is not None and isinstance(prompt, str):
1098
+ batch_size = 1
1099
+ elif prompt is not None and isinstance(prompt, list):
1100
+ batch_size = len(prompt)
1101
+ else:
1102
+ batch_size = prompt_embeds.shape[0]
1103
+
1104
+ device = self._execution_device
1105
+
1106
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1107
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1108
+ # corresponds to doing no classifier free guidance.
1109
+ do_classifier_free_guidance = guidance_scale > 1.0
1110
+
1111
+ # 3. Encode input prompt
1112
+ (cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None)
1113
+
1114
+ negative_prompt = negative_prompt if negative_prompt is not None else ""
1115
+
1116
+ (
1117
+ prompt_embeds,
1118
+ negative_prompt_embeds,
1119
+ pooled_prompt_embeds,
1120
+ negative_pooled_prompt_embeds,
1121
+ ) = get_weighted_text_embeddings_sdxl(
1122
+ pipe=self, prompt=prompt, neg_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt
1123
+ )
1124
+
1125
+ # 4. Prepare timesteps
1126
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1127
+
1128
+ timesteps = self.scheduler.timesteps
1129
+
1130
+ # 5. Prepare latent variables
1131
+ num_channels_latents = self.unet.config.in_channels
1132
+ latents = self.prepare_latents(
1133
+ batch_size * num_images_per_prompt,
1134
+ num_channels_latents,
1135
+ height,
1136
+ width,
1137
+ prompt_embeds.dtype,
1138
+ device,
1139
+ generator,
1140
+ latents,
1141
+ )
1142
+
1143
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1144
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1145
+
1146
+ # 7. Prepare added time ids & embeddings
1147
+ add_text_embeds = pooled_prompt_embeds
1148
+ add_time_ids = self._get_add_time_ids(
1149
+ original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
1150
+ )
1151
+
1152
+ if do_classifier_free_guidance:
1153
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1154
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1155
+ add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
1156
+
1157
+ prompt_embeds = prompt_embeds.to(device)
1158
+ add_text_embeds = add_text_embeds.to(device)
1159
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1160
+
1161
+ # 8. Denoising loop
1162
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1163
+
1164
+ # 7.1 Apply denoising_end
1165
+ if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
1166
+ discrete_timestep_cutoff = int(
1167
+ round(
1168
+ self.scheduler.config.num_train_timesteps
1169
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
1170
+ )
1171
+ )
1172
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1173
+ timesteps = timesteps[:num_inference_steps]
1174
+
1175
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1176
+ for i, t in enumerate(timesteps):
1177
+ # expand the latents if we are doing classifier free guidance
1178
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1179
+
1180
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1181
+
1182
+ # predict the noise residual
1183
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1184
+ noise_pred = self.unet(
1185
+ latent_model_input,
1186
+ t,
1187
+ encoder_hidden_states=prompt_embeds,
1188
+ cross_attention_kwargs=cross_attention_kwargs,
1189
+ added_cond_kwargs=added_cond_kwargs,
1190
+ return_dict=False,
1191
+ )[0]
1192
+
1193
+ # perform guidance
1194
+ if do_classifier_free_guidance:
1195
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1196
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1197
+
1198
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1199
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1200
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1201
+
1202
+ # compute the previous noisy sample x_t -> x_t-1
1203
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1204
+
1205
+ # call the callback, if provided
1206
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1207
+ progress_bar.update()
1208
+ if callback is not None and i % callback_steps == 0:
1209
+ step_idx = i // getattr(self.scheduler, "order", 1)
1210
+ callback(step_idx, t, latents)
1211
+
1212
+ if not output_type == "latent":
1213
+ # make sure the VAE is in float32 mode, as it overflows in float16
1214
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1215
+
1216
+ if needs_upcasting:
1217
+ self.upcast_vae()
1218
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1219
+
1220
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1221
+
1222
+ # cast back to fp16 if needed
1223
+ if needs_upcasting:
1224
+ self.vae.to(dtype=torch.float16)
1225
+ else:
1226
+ image = latents
1227
+ return StableDiffusionXLPipelineOutput(images=image)
1228
+
1229
+ # apply watermark if available
1230
+ if self.watermark is not None:
1231
+ image = self.watermark.apply_watermark(image)
1232
+
1233
+ image = self.image_processor.postprocess(image, output_type=output_type)
1234
+
1235
+ # Offload last model to CPU
1236
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1237
+ self.final_offload_hook.offload()
1238
+
1239
+ if not return_dict:
1240
+ return (image,)
1241
+
1242
+ return StableDiffusionXLPipelineOutput(images=image)
1243
+
1244
+ # Overrride to properly handle the loading and unloading of the additional text encoder.
1245
+ def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
1246
+ # We could have accessed the unet config from `lora_state_dict()` too. We pass
1247
+ # it here explicitly to be able to tell that it's coming from an SDXL
1248
+ # pipeline.
1249
+ state_dict, network_alphas = self.lora_state_dict(
1250
+ pretrained_model_name_or_path_or_dict,
1251
+ unet_config=self.unet.config,
1252
+ **kwargs,
1253
+ )
1254
+ self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
1255
+
1256
+ text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1257
+ if len(text_encoder_state_dict) > 0:
1258
+ self.load_lora_into_text_encoder(
1259
+ text_encoder_state_dict,
1260
+ network_alphas=network_alphas,
1261
+ text_encoder=self.text_encoder,
1262
+ prefix="text_encoder",
1263
+ lora_scale=self.lora_scale,
1264
+ )
1265
+
1266
+ text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1267
+ if len(text_encoder_2_state_dict) > 0:
1268
+ self.load_lora_into_text_encoder(
1269
+ text_encoder_2_state_dict,
1270
+ network_alphas=network_alphas,
1271
+ text_encoder=self.text_encoder_2,
1272
+ prefix="text_encoder_2",
1273
+ lora_scale=self.lora_scale,
1274
+ )
1275
+
1276
+ @classmethod
1277
+ def save_lora_weights(
1278
+ self,
1279
+ save_directory: Union[str, os.PathLike],
1280
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1281
+ text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1282
+ text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1283
+ is_main_process: bool = True,
1284
+ weight_name: str = None,
1285
+ save_function: Callable = None,
1286
+ safe_serialization: bool = False,
1287
+ ):
1288
+ state_dict = {}
1289
+
1290
+ def pack_weights(layers, prefix):
1291
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
1292
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1293
+ return layers_state_dict
1294
+
1295
+ state_dict.update(pack_weights(unet_lora_layers, "unet"))
1296
+
1297
+ if text_encoder_lora_layers and text_encoder_2_lora_layers:
1298
+ state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1299
+ state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1300
+
1301
+ self.write_lora_layers(
1302
+ state_dict=state_dict,
1303
+ save_directory=save_directory,
1304
+ is_main_process=is_main_process,
1305
+ weight_name=weight_name,
1306
+ save_function=save_function,
1307
+ safe_serialization=safe_serialization,
1308
+ )
1309
+
1310
+ def _remove_text_encoder_monkey_patch(self):
1311
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1312
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
v0.24.0/magic_mix.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import torch
4
+ from PIL import Image
5
+ from torchvision import transforms as tfms
6
+ from tqdm.auto import tqdm
7
+ from transformers import CLIPTextModel, CLIPTokenizer
8
+
9
+ from diffusers import (
10
+ AutoencoderKL,
11
+ DDIMScheduler,
12
+ DiffusionPipeline,
13
+ LMSDiscreteScheduler,
14
+ PNDMScheduler,
15
+ UNet2DConditionModel,
16
+ )
17
+
18
+
19
+ class MagicMixPipeline(DiffusionPipeline):
20
+ def __init__(
21
+ self,
22
+ vae: AutoencoderKL,
23
+ text_encoder: CLIPTextModel,
24
+ tokenizer: CLIPTokenizer,
25
+ unet: UNet2DConditionModel,
26
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
27
+ ):
28
+ super().__init__()
29
+
30
+ self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
31
+
32
+ # convert PIL image to latents
33
+ def encode(self, img):
34
+ with torch.no_grad():
35
+ latent = self.vae.encode(tfms.ToTensor()(img).unsqueeze(0).to(self.device) * 2 - 1)
36
+ latent = 0.18215 * latent.latent_dist.sample()
37
+ return latent
38
+
39
+ # convert latents to PIL image
40
+ def decode(self, latent):
41
+ latent = (1 / 0.18215) * latent
42
+ with torch.no_grad():
43
+ img = self.vae.decode(latent).sample
44
+ img = (img / 2 + 0.5).clamp(0, 1)
45
+ img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
46
+ img = (img * 255).round().astype("uint8")
47
+ return Image.fromarray(img[0])
48
+
49
+ # convert prompt into text embeddings, also unconditional embeddings
50
+ def prep_text(self, prompt):
51
+ text_input = self.tokenizer(
52
+ prompt,
53
+ padding="max_length",
54
+ max_length=self.tokenizer.model_max_length,
55
+ truncation=True,
56
+ return_tensors="pt",
57
+ )
58
+
59
+ text_embedding = self.text_encoder(text_input.input_ids.to(self.device))[0]
60
+
61
+ uncond_input = self.tokenizer(
62
+ "",
63
+ padding="max_length",
64
+ max_length=self.tokenizer.model_max_length,
65
+ truncation=True,
66
+ return_tensors="pt",
67
+ )
68
+
69
+ uncond_embedding = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
70
+
71
+ return torch.cat([uncond_embedding, text_embedding])
72
+
73
+ def __call__(
74
+ self,
75
+ img: Image.Image,
76
+ prompt: str,
77
+ kmin: float = 0.3,
78
+ kmax: float = 0.6,
79
+ mix_factor: float = 0.5,
80
+ seed: int = 42,
81
+ steps: int = 50,
82
+ guidance_scale: float = 7.5,
83
+ ) -> Image.Image:
84
+ tmin = steps - int(kmin * steps)
85
+ tmax = steps - int(kmax * steps)
86
+
87
+ text_embeddings = self.prep_text(prompt)
88
+
89
+ self.scheduler.set_timesteps(steps)
90
+
91
+ width, height = img.size
92
+ encoded = self.encode(img)
93
+
94
+ torch.manual_seed(seed)
95
+ noise = torch.randn(
96
+ (1, self.unet.config.in_channels, height // 8, width // 8),
97
+ ).to(self.device)
98
+
99
+ latents = self.scheduler.add_noise(
100
+ encoded,
101
+ noise,
102
+ timesteps=self.scheduler.timesteps[tmax],
103
+ )
104
+
105
+ input = torch.cat([latents] * 2)
106
+
107
+ input = self.scheduler.scale_model_input(input, self.scheduler.timesteps[tmax])
108
+
109
+ with torch.no_grad():
110
+ pred = self.unet(
111
+ input,
112
+ self.scheduler.timesteps[tmax],
113
+ encoder_hidden_states=text_embeddings,
114
+ ).sample
115
+
116
+ pred_uncond, pred_text = pred.chunk(2)
117
+ pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
118
+
119
+ latents = self.scheduler.step(pred, self.scheduler.timesteps[tmax], latents).prev_sample
120
+
121
+ for i, t in enumerate(tqdm(self.scheduler.timesteps)):
122
+ if i > tmax:
123
+ if i < tmin: # layout generation phase
124
+ orig_latents = self.scheduler.add_noise(
125
+ encoded,
126
+ noise,
127
+ timesteps=t,
128
+ )
129
+
130
+ input = (
131
+ (mix_factor * latents) + (1 - mix_factor) * orig_latents
132
+ ) # interpolating between layout noise and conditionally generated noise to preserve layout sematics
133
+ input = torch.cat([input] * 2)
134
+
135
+ else: # content generation phase
136
+ input = torch.cat([latents] * 2)
137
+
138
+ input = self.scheduler.scale_model_input(input, t)
139
+
140
+ with torch.no_grad():
141
+ pred = self.unet(
142
+ input,
143
+ t,
144
+ encoder_hidden_states=text_embeddings,
145
+ ).sample
146
+
147
+ pred_uncond, pred_text = pred.chunk(2)
148
+ pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
149
+
150
+ latents = self.scheduler.step(pred, t, latents).prev_sample
151
+
152
+ return self.decode(latents)
v0.24.0/masked_stable_diffusion_img2img.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Union
2
+
3
+ import numpy as np
4
+ import PIL.Image
5
+ import torch
6
+
7
+ from diffusers import StableDiffusionImg2ImgPipeline
8
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
9
+
10
+
11
+ class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
12
+ debug_save = False
13
+
14
+ @torch.no_grad()
15
+ def __call__(
16
+ self,
17
+ prompt: Union[str, List[str]] = None,
18
+ image: Union[
19
+ torch.FloatTensor,
20
+ PIL.Image.Image,
21
+ np.ndarray,
22
+ List[torch.FloatTensor],
23
+ List[PIL.Image.Image],
24
+ List[np.ndarray],
25
+ ] = None,
26
+ strength: float = 0.8,
27
+ num_inference_steps: Optional[int] = 50,
28
+ guidance_scale: Optional[float] = 7.5,
29
+ negative_prompt: Optional[Union[str, List[str]]] = None,
30
+ num_images_per_prompt: Optional[int] = 1,
31
+ eta: Optional[float] = 0.0,
32
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
33
+ prompt_embeds: Optional[torch.FloatTensor] = None,
34
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
35
+ output_type: Optional[str] = "pil",
36
+ return_dict: bool = True,
37
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
38
+ callback_steps: int = 1,
39
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
40
+ mask: Union[
41
+ torch.FloatTensor,
42
+ PIL.Image.Image,
43
+ np.ndarray,
44
+ List[torch.FloatTensor],
45
+ List[PIL.Image.Image],
46
+ List[np.ndarray],
47
+ ] = None,
48
+ ):
49
+ r"""
50
+ The call function to the pipeline for generation.
51
+
52
+ Args:
53
+ prompt (`str` or `List[str]`, *optional*):
54
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
55
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
56
+ `Image` or tensor representing an image batch to be used as the starting point. Can also accept image
57
+ latents as `image`, but if passing latents directly it is not encoded again.
58
+ strength (`float`, *optional*, defaults to 0.8):
59
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
60
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
61
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
62
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
63
+ essentially ignores `image`.
64
+ num_inference_steps (`int`, *optional*, defaults to 50):
65
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
66
+ expense of slower inference. This parameter is modulated by `strength`.
67
+ guidance_scale (`float`, *optional*, defaults to 7.5):
68
+ A higher guidance scale value encourages the model to generate images closely linked to the text
69
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
70
+ negative_prompt (`str` or `List[str]`, *optional*):
71
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
72
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
73
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
74
+ The number of images to generate per prompt.
75
+ eta (`float`, *optional*, defaults to 0.0):
76
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
77
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
78
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
79
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
80
+ generation deterministic.
81
+ prompt_embeds (`torch.FloatTensor`, *optional*):
82
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
83
+ provided, text embeddings are generated from the `prompt` input argument.
84
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
85
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
86
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
87
+ output_type (`str`, *optional*, defaults to `"pil"`):
88
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
89
+ return_dict (`bool`, *optional*, defaults to `True`):
90
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
91
+ plain tuple.
92
+ callback (`Callable`, *optional*):
93
+ A function that calls every `callback_steps` steps during inference. The function is called with the
94
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
95
+ callback_steps (`int`, *optional*, defaults to 1):
96
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
97
+ every step.
98
+ cross_attention_kwargs (`dict`, *optional*):
99
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
100
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
101
+ mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*):
102
+ A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied.
103
+ Examples:
104
+
105
+ Returns:
106
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
107
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
108
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
109
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
110
+ "not-safe-for-work" (nsfw) content.
111
+ """
112
+ # code adapted from parent class StableDiffusionImg2ImgPipeline
113
+
114
+ # 0. Check inputs. Raise error if not correct
115
+ self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
116
+
117
+ # 1. Define call parameters
118
+ if prompt is not None and isinstance(prompt, str):
119
+ batch_size = 1
120
+ elif prompt is not None and isinstance(prompt, list):
121
+ batch_size = len(prompt)
122
+ else:
123
+ batch_size = prompt_embeds.shape[0]
124
+ device = self._execution_device
125
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
126
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
127
+ # corresponds to doing no classifier free guidance.
128
+ do_classifier_free_guidance = guidance_scale > 1.0
129
+
130
+ # 2. Encode input prompt
131
+ text_encoder_lora_scale = (
132
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
133
+ )
134
+ prompt_embeds = self._encode_prompt(
135
+ prompt,
136
+ device,
137
+ num_images_per_prompt,
138
+ do_classifier_free_guidance,
139
+ negative_prompt,
140
+ prompt_embeds=prompt_embeds,
141
+ negative_prompt_embeds=negative_prompt_embeds,
142
+ lora_scale=text_encoder_lora_scale,
143
+ )
144
+
145
+ # 3. Preprocess image
146
+ image = self.image_processor.preprocess(image)
147
+
148
+ # 4. set timesteps
149
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
150
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
151
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
152
+
153
+ # 5. Prepare latent variables
154
+ # it is sampled from the latent distribution of the VAE
155
+ latents = self.prepare_latents(
156
+ image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
157
+ )
158
+
159
+ # mean of the latent distribution
160
+ init_latents = [
161
+ self.vae.encode(image.to(device=device, dtype=prompt_embeds.dtype)[i : i + 1]).latent_dist.mean
162
+ for i in range(batch_size)
163
+ ]
164
+ init_latents = torch.cat(init_latents, dim=0)
165
+
166
+ # 6. create latent mask
167
+ latent_mask = self._make_latent_mask(latents, mask)
168
+
169
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
170
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
171
+
172
+ # 8. Denoising loop
173
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
174
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
175
+ for i, t in enumerate(timesteps):
176
+ # expand the latents if we are doing classifier free guidance
177
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
178
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
179
+
180
+ # predict the noise residual
181
+ noise_pred = self.unet(
182
+ latent_model_input,
183
+ t,
184
+ encoder_hidden_states=prompt_embeds,
185
+ cross_attention_kwargs=cross_attention_kwargs,
186
+ return_dict=False,
187
+ )[0]
188
+
189
+ # perform guidance
190
+ if do_classifier_free_guidance:
191
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
192
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
193
+
194
+ if latent_mask is not None:
195
+ latents = torch.lerp(init_latents * self.vae.config.scaling_factor, latents, latent_mask)
196
+ noise_pred = torch.lerp(torch.zeros_like(noise_pred), noise_pred, latent_mask)
197
+
198
+ # compute the previous noisy sample x_t -> x_t-1
199
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
200
+
201
+ # call the callback, if provided
202
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
203
+ progress_bar.update()
204
+ if callback is not None and i % callback_steps == 0:
205
+ step_idx = i // getattr(self.scheduler, "order", 1)
206
+ callback(step_idx, t, latents)
207
+
208
+ if not output_type == "latent":
209
+ scaled = latents / self.vae.config.scaling_factor
210
+ if latent_mask is not None:
211
+ # scaled = latents / self.vae.config.scaling_factor * latent_mask + init_latents * (1 - latent_mask)
212
+ scaled = torch.lerp(init_latents, scaled, latent_mask)
213
+ image = self.vae.decode(scaled, return_dict=False)[0]
214
+ if self.debug_save:
215
+ image_gen = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
216
+ image_gen = self.image_processor.postprocess(image_gen, output_type=output_type, do_denormalize=[True])
217
+ image_gen[0].save("from_latent.png")
218
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
219
+ else:
220
+ image = latents
221
+ has_nsfw_concept = None
222
+
223
+ if has_nsfw_concept is None:
224
+ do_denormalize = [True] * image.shape[0]
225
+ else:
226
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
227
+
228
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
229
+
230
+ # Offload last model to CPU
231
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
232
+ self.final_offload_hook.offload()
233
+
234
+ if not return_dict:
235
+ return (image, has_nsfw_concept)
236
+
237
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
238
+
239
+ def _make_latent_mask(self, latents, mask):
240
+ if mask is not None:
241
+ latent_mask = []
242
+ if not isinstance(mask, list):
243
+ tmp_mask = [mask]
244
+ else:
245
+ tmp_mask = mask
246
+ _, l_channels, l_height, l_width = latents.shape
247
+ for m in tmp_mask:
248
+ if not isinstance(m, PIL.Image.Image):
249
+ if len(m.shape) == 2:
250
+ m = m[..., np.newaxis]
251
+ if m.max() > 1:
252
+ m = m / 255.0
253
+ m = self.image_processor.numpy_to_pil(m)[0]
254
+ if m.mode != "L":
255
+ m = m.convert("L")
256
+ resized = self.image_processor.resize(m, l_height, l_width)
257
+ if self.debug_save:
258
+ resized.save("latent_mask.png")
259
+ latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0))
260
+ latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents)
261
+ latent_mask = latent_mask / latent_mask.max()
262
+ return latent_mask
v0.24.0/mixture_canvas.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from copy import deepcopy
3
+ from dataclasses import asdict, dataclass
4
+ from enum import Enum
5
+ from typing import List, Optional, Union
6
+
7
+ import numpy as np
8
+ import torch
9
+ from numpy import exp, pi, sqrt
10
+ from torchvision.transforms.functional import resize
11
+ from tqdm.auto import tqdm
12
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
13
+
14
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
15
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
16
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
17
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
18
+
19
+
20
+ def preprocess_image(image):
21
+ from PIL import Image
22
+
23
+ """Preprocess an input image
24
+
25
+ Same as
26
+ https://github.com/huggingface/diffusers/blob/1138d63b519e37f0ce04e027b9f4a3261d27c628/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L44
27
+ """
28
+ w, h = image.size
29
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
30
+ image = image.resize((w, h), resample=Image.LANCZOS)
31
+ image = np.array(image).astype(np.float32) / 255.0
32
+ image = image[None].transpose(0, 3, 1, 2)
33
+ image = torch.from_numpy(image)
34
+ return 2.0 * image - 1.0
35
+
36
+
37
+ @dataclass
38
+ class CanvasRegion:
39
+ """Class defining a rectangular region in the canvas"""
40
+
41
+ row_init: int # Region starting row in pixel space (included)
42
+ row_end: int # Region end row in pixel space (not included)
43
+ col_init: int # Region starting column in pixel space (included)
44
+ col_end: int # Region end column in pixel space (not included)
45
+ region_seed: int = None # Seed for random operations in this region
46
+ noise_eps: float = 0.0 # Deviation of a zero-mean gaussian noise to be applied over the latents in this region. Useful for slightly "rerolling" latents
47
+
48
+ def __post_init__(self):
49
+ # Initialize arguments if not specified
50
+ if self.region_seed is None:
51
+ self.region_seed = np.random.randint(9999999999)
52
+ # Check coordinates are non-negative
53
+ for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
54
+ if coord < 0:
55
+ raise ValueError(
56
+ f"A CanvasRegion must be defined with non-negative indices, found ({self.row_init}, {self.row_end}, {self.col_init}, {self.col_end})"
57
+ )
58
+ # Check coordinates are divisible by 8, else we end up with nasty rounding error when mapping to latent space
59
+ for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
60
+ if coord // 8 != coord / 8:
61
+ raise ValueError(
62
+ f"A CanvasRegion must be defined with locations divisible by 8, found ({self.row_init}-{self.row_end}, {self.col_init}-{self.col_end})"
63
+ )
64
+ # Check noise eps is non-negative
65
+ if self.noise_eps < 0:
66
+ raise ValueError(f"A CanvasRegion must be defined noises eps non-negative, found {self.noise_eps}")
67
+ # Compute coordinates for this region in latent space
68
+ self.latent_row_init = self.row_init // 8
69
+ self.latent_row_end = self.row_end // 8
70
+ self.latent_col_init = self.col_init // 8
71
+ self.latent_col_end = self.col_end // 8
72
+
73
+ @property
74
+ def width(self):
75
+ return self.col_end - self.col_init
76
+
77
+ @property
78
+ def height(self):
79
+ return self.row_end - self.row_init
80
+
81
+ def get_region_generator(self, device="cpu"):
82
+ """Creates a torch.Generator based on the random seed of this region"""
83
+ # Initialize region generator
84
+ return torch.Generator(device).manual_seed(self.region_seed)
85
+
86
+ @property
87
+ def __dict__(self):
88
+ return asdict(self)
89
+
90
+
91
+ class MaskModes(Enum):
92
+ """Modes in which the influence of diffuser is masked"""
93
+
94
+ CONSTANT = "constant"
95
+ GAUSSIAN = "gaussian"
96
+ QUARTIC = "quartic" # See https://en.wikipedia.org/wiki/Kernel_(statistics)
97
+
98
+
99
+ @dataclass
100
+ class DiffusionRegion(CanvasRegion):
101
+ """Abstract class defining a region where some class of diffusion process is acting"""
102
+
103
+ pass
104
+
105
+
106
+ @dataclass
107
+ class Text2ImageRegion(DiffusionRegion):
108
+ """Class defining a region where a text guided diffusion process is acting"""
109
+
110
+ prompt: str = "" # Text prompt guiding the diffuser in this region
111
+ guidance_scale: float = 7.5 # Guidance scale of the diffuser in this region. If None, randomize
112
+ mask_type: MaskModes = MaskModes.GAUSSIAN.value # Kind of weight mask applied to this region
113
+ mask_weight: float = 1.0 # Global weights multiplier of the mask
114
+ tokenized_prompt = None # Tokenized prompt
115
+ encoded_prompt = None # Encoded prompt
116
+
117
+ def __post_init__(self):
118
+ super().__post_init__()
119
+ # Mask weight cannot be negative
120
+ if self.mask_weight < 0:
121
+ raise ValueError(
122
+ f"A Text2ImageRegion must be defined with non-negative mask weight, found {self.mask_weight}"
123
+ )
124
+ # Mask type must be an actual known mask
125
+ if self.mask_type not in [e.value for e in MaskModes]:
126
+ raise ValueError(
127
+ f"A Text2ImageRegion was defined with mask {self.mask_type}, which is not an accepted mask ({[e.value for e in MaskModes]})"
128
+ )
129
+ # Randomize arguments if given as None
130
+ if self.guidance_scale is None:
131
+ self.guidance_scale = np.random.randint(5, 30)
132
+ # Clean prompt
133
+ self.prompt = re.sub(" +", " ", self.prompt).replace("\n", " ")
134
+
135
+ def tokenize_prompt(self, tokenizer):
136
+ """Tokenizes the prompt for this diffusion region using a given tokenizer"""
137
+ self.tokenized_prompt = tokenizer(
138
+ self.prompt,
139
+ padding="max_length",
140
+ max_length=tokenizer.model_max_length,
141
+ truncation=True,
142
+ return_tensors="pt",
143
+ )
144
+
145
+ def encode_prompt(self, text_encoder, device):
146
+ """Encodes the previously tokenized prompt for this diffusion region using a given encoder"""
147
+ assert self.tokenized_prompt is not None, ValueError(
148
+ "Prompt in diffusion region must be tokenized before encoding"
149
+ )
150
+ self.encoded_prompt = text_encoder(self.tokenized_prompt.input_ids.to(device))[0]
151
+
152
+
153
+ @dataclass
154
+ class Image2ImageRegion(DiffusionRegion):
155
+ """Class defining a region where an image guided diffusion process is acting"""
156
+
157
+ reference_image: torch.FloatTensor = None
158
+ strength: float = 0.8 # Strength of the image
159
+
160
+ def __post_init__(self):
161
+ super().__post_init__()
162
+ if self.reference_image is None:
163
+ raise ValueError("Must provide a reference image when creating an Image2ImageRegion")
164
+ if self.strength < 0 or self.strength > 1:
165
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {self.strength}")
166
+ # Rescale image to region shape
167
+ self.reference_image = resize(self.reference_image, size=[self.height, self.width])
168
+
169
+ def encode_reference_image(self, encoder, device, generator, cpu_vae=False):
170
+ """Encodes the reference image for this Image2Image region into the latent space"""
171
+ # Place encoder in CPU or not following the parameter cpu_vae
172
+ if cpu_vae:
173
+ # Note here we use mean instead of sample, to avoid moving also generator to CPU, which is troublesome
174
+ self.reference_latents = encoder.cpu().encode(self.reference_image).latent_dist.mean.to(device)
175
+ else:
176
+ self.reference_latents = encoder.encode(self.reference_image.to(device)).latent_dist.sample(
177
+ generator=generator
178
+ )
179
+ self.reference_latents = 0.18215 * self.reference_latents
180
+
181
+ @property
182
+ def __dict__(self):
183
+ # This class requires special casting to dict because of the reference_image tensor. Otherwise it cannot be casted to JSON
184
+
185
+ # Get all basic fields from parent class
186
+ super_fields = {key: getattr(self, key) for key in DiffusionRegion.__dataclass_fields__.keys()}
187
+ # Pack other fields
188
+ return {**super_fields, "reference_image": self.reference_image.cpu().tolist(), "strength": self.strength}
189
+
190
+
191
+ class RerollModes(Enum):
192
+ """Modes in which the reroll regions operate"""
193
+
194
+ RESET = "reset" # Completely reset the random noise in the region
195
+ EPSILON = "epsilon" # Alter slightly the latents in the region
196
+
197
+
198
+ @dataclass
199
+ class RerollRegion(CanvasRegion):
200
+ """Class defining a rectangular canvas region in which initial latent noise will be rerolled"""
201
+
202
+ reroll_mode: RerollModes = RerollModes.RESET.value
203
+
204
+
205
+ @dataclass
206
+ class MaskWeightsBuilder:
207
+ """Auxiliary class to compute a tensor of weights for a given diffusion region"""
208
+
209
+ latent_space_dim: int # Size of the U-net latent space
210
+ nbatch: int = 1 # Batch size in the U-net
211
+
212
+ def compute_mask_weights(self, region: DiffusionRegion) -> torch.tensor:
213
+ """Computes a tensor of weights for a given diffusion region"""
214
+ MASK_BUILDERS = {
215
+ MaskModes.CONSTANT.value: self._constant_weights,
216
+ MaskModes.GAUSSIAN.value: self._gaussian_weights,
217
+ MaskModes.QUARTIC.value: self._quartic_weights,
218
+ }
219
+ return MASK_BUILDERS[region.mask_type](region)
220
+
221
+ def _constant_weights(self, region: DiffusionRegion) -> torch.tensor:
222
+ """Computes a tensor of constant for a given diffusion region"""
223
+ latent_width = region.latent_col_end - region.latent_col_init
224
+ latent_height = region.latent_row_end - region.latent_row_init
225
+ return torch.ones(self.nbatch, self.latent_space_dim, latent_height, latent_width) * region.mask_weight
226
+
227
+ def _gaussian_weights(self, region: DiffusionRegion) -> torch.tensor:
228
+ """Generates a gaussian mask of weights for tile contributions"""
229
+ latent_width = region.latent_col_end - region.latent_col_init
230
+ latent_height = region.latent_row_end - region.latent_row_init
231
+
232
+ var = 0.01
233
+ midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
234
+ x_probs = [
235
+ exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
236
+ for x in range(latent_width)
237
+ ]
238
+ midpoint = (latent_height - 1) / 2
239
+ y_probs = [
240
+ exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
241
+ for y in range(latent_height)
242
+ ]
243
+
244
+ weights = np.outer(y_probs, x_probs) * region.mask_weight
245
+ return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
246
+
247
+ def _quartic_weights(self, region: DiffusionRegion) -> torch.tensor:
248
+ """Generates a quartic mask of weights for tile contributions
249
+
250
+ The quartic kernel has bounded support over the diffusion region, and a smooth decay to the region limits.
251
+ """
252
+ quartic_constant = 15.0 / 16.0
253
+
254
+ support = (np.array(range(region.latent_col_init, region.latent_col_end)) - region.latent_col_init) / (
255
+ region.latent_col_end - region.latent_col_init - 1
256
+ ) * 1.99 - (1.99 / 2.0)
257
+ x_probs = quartic_constant * np.square(1 - np.square(support))
258
+ support = (np.array(range(region.latent_row_init, region.latent_row_end)) - region.latent_row_init) / (
259
+ region.latent_row_end - region.latent_row_init - 1
260
+ ) * 1.99 - (1.99 / 2.0)
261
+ y_probs = quartic_constant * np.square(1 - np.square(support))
262
+
263
+ weights = np.outer(y_probs, x_probs) * region.mask_weight
264
+ return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
265
+
266
+
267
+ class StableDiffusionCanvasPipeline(DiffusionPipeline):
268
+ """Stable Diffusion pipeline that mixes several diffusers in the same canvas"""
269
+
270
+ def __init__(
271
+ self,
272
+ vae: AutoencoderKL,
273
+ text_encoder: CLIPTextModel,
274
+ tokenizer: CLIPTokenizer,
275
+ unet: UNet2DConditionModel,
276
+ scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler],
277
+ safety_checker: StableDiffusionSafetyChecker,
278
+ feature_extractor: CLIPFeatureExtractor,
279
+ ):
280
+ super().__init__()
281
+ self.register_modules(
282
+ vae=vae,
283
+ text_encoder=text_encoder,
284
+ tokenizer=tokenizer,
285
+ unet=unet,
286
+ scheduler=scheduler,
287
+ safety_checker=safety_checker,
288
+ feature_extractor=feature_extractor,
289
+ )
290
+
291
+ def decode_latents(self, latents, cpu_vae=False):
292
+ """Decodes a given array of latents into pixel space"""
293
+ # scale and decode the image latents with vae
294
+ if cpu_vae:
295
+ lat = deepcopy(latents).cpu()
296
+ vae = deepcopy(self.vae).cpu()
297
+ else:
298
+ lat = latents
299
+ vae = self.vae
300
+
301
+ lat = 1 / 0.18215 * lat
302
+ image = vae.decode(lat).sample
303
+
304
+ image = (image / 2 + 0.5).clamp(0, 1)
305
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
306
+
307
+ return self.numpy_to_pil(image)
308
+
309
+ def get_latest_timestep_img2img(self, num_inference_steps, strength):
310
+ """Finds the latest timesteps where an img2img strength does not impose latents anymore"""
311
+ # get the original timestep using init_timestep
312
+ offset = self.scheduler.config.get("steps_offset", 0)
313
+ init_timestep = int(num_inference_steps * (1 - strength)) + offset
314
+ init_timestep = min(init_timestep, num_inference_steps)
315
+
316
+ t_start = min(max(num_inference_steps - init_timestep + offset, 0), num_inference_steps - 1)
317
+ latest_timestep = self.scheduler.timesteps[t_start]
318
+
319
+ return latest_timestep
320
+
321
+ @torch.no_grad()
322
+ def __call__(
323
+ self,
324
+ canvas_height: int,
325
+ canvas_width: int,
326
+ regions: List[DiffusionRegion],
327
+ num_inference_steps: Optional[int] = 50,
328
+ seed: Optional[int] = 12345,
329
+ reroll_regions: Optional[List[RerollRegion]] = None,
330
+ cpu_vae: Optional[bool] = False,
331
+ decode_steps: Optional[bool] = False,
332
+ ):
333
+ if reroll_regions is None:
334
+ reroll_regions = []
335
+ batch_size = 1
336
+
337
+ if decode_steps:
338
+ steps_images = []
339
+
340
+ # Prepare scheduler
341
+ self.scheduler.set_timesteps(num_inference_steps, device=self.device)
342
+
343
+ # Split diffusion regions by their kind
344
+ text2image_regions = [region for region in regions if isinstance(region, Text2ImageRegion)]
345
+ image2image_regions = [region for region in regions if isinstance(region, Image2ImageRegion)]
346
+
347
+ # Prepare text embeddings
348
+ for region in text2image_regions:
349
+ region.tokenize_prompt(self.tokenizer)
350
+ region.encode_prompt(self.text_encoder, self.device)
351
+
352
+ # Create original noisy latents using the timesteps
353
+ latents_shape = (batch_size, self.unet.config.in_channels, canvas_height // 8, canvas_width // 8)
354
+ generator = torch.Generator(self.device).manual_seed(seed)
355
+ init_noise = torch.randn(latents_shape, generator=generator, device=self.device)
356
+
357
+ # Reset latents in seed reroll regions, if requested
358
+ for region in reroll_regions:
359
+ if region.reroll_mode == RerollModes.RESET.value:
360
+ region_shape = (
361
+ latents_shape[0],
362
+ latents_shape[1],
363
+ region.latent_row_end - region.latent_row_init,
364
+ region.latent_col_end - region.latent_col_init,
365
+ )
366
+ init_noise[
367
+ :,
368
+ :,
369
+ region.latent_row_init : region.latent_row_end,
370
+ region.latent_col_init : region.latent_col_end,
371
+ ] = torch.randn(region_shape, generator=region.get_region_generator(self.device), device=self.device)
372
+
373
+ # Apply epsilon noise to regions: first diffusion regions, then reroll regions
374
+ all_eps_rerolls = regions + [r for r in reroll_regions if r.reroll_mode == RerollModes.EPSILON.value]
375
+ for region in all_eps_rerolls:
376
+ if region.noise_eps > 0:
377
+ region_noise = init_noise[
378
+ :,
379
+ :,
380
+ region.latent_row_init : region.latent_row_end,
381
+ region.latent_col_init : region.latent_col_end,
382
+ ]
383
+ eps_noise = (
384
+ torch.randn(
385
+ region_noise.shape, generator=region.get_region_generator(self.device), device=self.device
386
+ )
387
+ * region.noise_eps
388
+ )
389
+ init_noise[
390
+ :,
391
+ :,
392
+ region.latent_row_init : region.latent_row_end,
393
+ region.latent_col_init : region.latent_col_end,
394
+ ] += eps_noise
395
+
396
+ # scale the initial noise by the standard deviation required by the scheduler
397
+ latents = init_noise * self.scheduler.init_noise_sigma
398
+
399
+ # Get unconditional embeddings for classifier free guidance in text2image regions
400
+ for region in text2image_regions:
401
+ max_length = region.tokenized_prompt.input_ids.shape[-1]
402
+ uncond_input = self.tokenizer(
403
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
404
+ )
405
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
406
+
407
+ # For classifier free guidance, we need to do two forward passes.
408
+ # Here we concatenate the unconditional and text embeddings into a single batch
409
+ # to avoid doing two forward passes
410
+ region.encoded_prompt = torch.cat([uncond_embeddings, region.encoded_prompt])
411
+
412
+ # Prepare image latents
413
+ for region in image2image_regions:
414
+ region.encode_reference_image(self.vae, device=self.device, generator=generator)
415
+
416
+ # Prepare mask of weights for each region
417
+ mask_builder = MaskWeightsBuilder(latent_space_dim=self.unet.config.in_channels, nbatch=batch_size)
418
+ mask_weights = [mask_builder.compute_mask_weights(region).to(self.device) for region in text2image_regions]
419
+
420
+ # Diffusion timesteps
421
+ for i, t in tqdm(enumerate(self.scheduler.timesteps)):
422
+ # Diffuse each region
423
+ noise_preds_regions = []
424
+
425
+ # text2image regions
426
+ for region in text2image_regions:
427
+ region_latents = latents[
428
+ :,
429
+ :,
430
+ region.latent_row_init : region.latent_row_end,
431
+ region.latent_col_init : region.latent_col_end,
432
+ ]
433
+ # expand the latents if we are doing classifier free guidance
434
+ latent_model_input = torch.cat([region_latents] * 2)
435
+ # scale model input following scheduler rules
436
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
437
+ # predict the noise residual
438
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=region.encoded_prompt)["sample"]
439
+ # perform guidance
440
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
441
+ noise_pred_region = noise_pred_uncond + region.guidance_scale * (noise_pred_text - noise_pred_uncond)
442
+ noise_preds_regions.append(noise_pred_region)
443
+
444
+ # Merge noise predictions for all tiles
445
+ noise_pred = torch.zeros(latents.shape, device=self.device)
446
+ contributors = torch.zeros(latents.shape, device=self.device)
447
+ # Add each tile contribution to overall latents
448
+ for region, noise_pred_region, mask_weights_region in zip(
449
+ text2image_regions, noise_preds_regions, mask_weights
450
+ ):
451
+ noise_pred[
452
+ :,
453
+ :,
454
+ region.latent_row_init : region.latent_row_end,
455
+ region.latent_col_init : region.latent_col_end,
456
+ ] += noise_pred_region * mask_weights_region
457
+ contributors[
458
+ :,
459
+ :,
460
+ region.latent_row_init : region.latent_row_end,
461
+ region.latent_col_init : region.latent_col_end,
462
+ ] += mask_weights_region
463
+ # Average overlapping areas with more than 1 contributor
464
+ noise_pred /= contributors
465
+ noise_pred = torch.nan_to_num(
466
+ noise_pred
467
+ ) # Replace NaNs by zeros: NaN can appear if a position is not covered by any DiffusionRegion
468
+
469
+ # compute the previous noisy sample x_t -> x_t-1
470
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
471
+
472
+ # Image2Image regions: override latents generated by the scheduler
473
+ for region in image2image_regions:
474
+ influence_step = self.get_latest_timestep_img2img(num_inference_steps, region.strength)
475
+ # Only override in the timesteps before the last influence step of the image (given by its strength)
476
+ if t > influence_step:
477
+ timestep = t.repeat(batch_size)
478
+ region_init_noise = init_noise[
479
+ :,
480
+ :,
481
+ region.latent_row_init : region.latent_row_end,
482
+ region.latent_col_init : region.latent_col_end,
483
+ ]
484
+ region_latents = self.scheduler.add_noise(region.reference_latents, region_init_noise, timestep)
485
+ latents[
486
+ :,
487
+ :,
488
+ region.latent_row_init : region.latent_row_end,
489
+ region.latent_col_init : region.latent_col_end,
490
+ ] = region_latents
491
+
492
+ if decode_steps:
493
+ steps_images.append(self.decode_latents(latents, cpu_vae))
494
+
495
+ # scale and decode the image latents with vae
496
+ image = self.decode_latents(latents, cpu_vae)
497
+
498
+ output = {"images": image}
499
+ if decode_steps:
500
+ output = {**output, "steps_images": steps_images}
501
+ return output
v0.24.0/mixture_tiling.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from copy import deepcopy
3
+ from enum import Enum
4
+ from typing import List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ from tqdm.auto import tqdm
8
+
9
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
10
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
11
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
12
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
13
+ from diffusers.utils import logging
14
+
15
+
16
+ try:
17
+ from ligo.segments import segment
18
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
19
+ except ImportError:
20
+ raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
21
+
22
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
+
24
+ EXAMPLE_DOC_STRING = """
25
+ Examples:
26
+ ```py
27
+ >>> from diffusers import LMSDiscreteScheduler, DiffusionPipeline
28
+
29
+ >>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
30
+ >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler, custom_pipeline="mixture_tiling")
31
+ >>> pipeline.to("cuda")
32
+
33
+ >>> image = pipeline(
34
+ >>> prompt=[[
35
+ >>> "A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
36
+ >>> "A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
37
+ >>> "An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
38
+ >>> ]],
39
+ >>> tile_height=640,
40
+ >>> tile_width=640,
41
+ >>> tile_row_overlap=0,
42
+ >>> tile_col_overlap=256,
43
+ >>> guidance_scale=8,
44
+ >>> seed=7178915308,
45
+ >>> num_inference_steps=50,
46
+ >>> )["images"][0]
47
+ ```
48
+ """
49
+
50
+
51
+ def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
52
+ """Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
53
+
54
+ Returns a tuple with:
55
+ - Starting coordinates of rows in pixel space
56
+ - Ending coordinates of rows in pixel space
57
+ - Starting coordinates of columns in pixel space
58
+ - Ending coordinates of columns in pixel space
59
+ """
60
+ px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
61
+ px_row_end = px_row_init + tile_height
62
+ px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
63
+ px_col_end = px_col_init + tile_width
64
+ return px_row_init, px_row_end, px_col_init, px_col_end
65
+
66
+
67
+ def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
68
+ """Translates coordinates in pixel space to coordinates in latent space"""
69
+ return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
70
+
71
+
72
+ def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
73
+ """Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
74
+
75
+ Returns a tuple with:
76
+ - Starting coordinates of rows in latent space
77
+ - Ending coordinates of rows in latent space
78
+ - Starting coordinates of columns in latent space
79
+ - Ending coordinates of columns in latent space
80
+ """
81
+ px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
82
+ tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
83
+ )
84
+ return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
85
+
86
+
87
+ def _tile2latent_exclusive_indices(
88
+ tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
89
+ ):
90
+ """Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
91
+
92
+ Returns a tuple with:
93
+ - Starting coordinates of rows in latent space
94
+ - Ending coordinates of rows in latent space
95
+ - Starting coordinates of columns in latent space
96
+ - Ending coordinates of columns in latent space
97
+ """
98
+ row_init, row_end, col_init, col_end = _tile2latent_indices(
99
+ tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
100
+ )
101
+ row_segment = segment(row_init, row_end)
102
+ col_segment = segment(col_init, col_end)
103
+ # Iterate over the rest of tiles, clipping the region for the current tile
104
+ for row in range(rows):
105
+ for column in range(columns):
106
+ if row != tile_row and column != tile_col:
107
+ clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
108
+ row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
109
+ )
110
+ row_segment = row_segment - segment(clip_row_init, clip_row_end)
111
+ col_segment = col_segment - segment(clip_col_init, clip_col_end)
112
+ # return row_init, row_end, col_init, col_end
113
+ return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
114
+
115
+
116
+ class StableDiffusionExtrasMixin:
117
+ """Mixin providing additional convenience method to Stable Diffusion pipelines"""
118
+
119
+ def decode_latents(self, latents, cpu_vae=False):
120
+ """Decodes a given array of latents into pixel space"""
121
+ # scale and decode the image latents with vae
122
+ if cpu_vae:
123
+ lat = deepcopy(latents).cpu()
124
+ vae = deepcopy(self.vae).cpu()
125
+ else:
126
+ lat = latents
127
+ vae = self.vae
128
+
129
+ lat = 1 / 0.18215 * lat
130
+ image = vae.decode(lat).sample
131
+
132
+ image = (image / 2 + 0.5).clamp(0, 1)
133
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
134
+
135
+ return self.numpy_to_pil(image)
136
+
137
+
138
+ class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixin):
139
+ def __init__(
140
+ self,
141
+ vae: AutoencoderKL,
142
+ text_encoder: CLIPTextModel,
143
+ tokenizer: CLIPTokenizer,
144
+ unet: UNet2DConditionModel,
145
+ scheduler: Union[DDIMScheduler, PNDMScheduler],
146
+ safety_checker: StableDiffusionSafetyChecker,
147
+ feature_extractor: CLIPFeatureExtractor,
148
+ ):
149
+ super().__init__()
150
+ self.register_modules(
151
+ vae=vae,
152
+ text_encoder=text_encoder,
153
+ tokenizer=tokenizer,
154
+ unet=unet,
155
+ scheduler=scheduler,
156
+ safety_checker=safety_checker,
157
+ feature_extractor=feature_extractor,
158
+ )
159
+
160
+ class SeedTilesMode(Enum):
161
+ """Modes in which the latents of a particular tile can be re-seeded"""
162
+
163
+ FULL = "full"
164
+ EXCLUSIVE = "exclusive"
165
+
166
+ @torch.no_grad()
167
+ def __call__(
168
+ self,
169
+ prompt: Union[str, List[List[str]]],
170
+ num_inference_steps: Optional[int] = 50,
171
+ guidance_scale: Optional[float] = 7.5,
172
+ eta: Optional[float] = 0.0,
173
+ seed: Optional[int] = None,
174
+ tile_height: Optional[int] = 512,
175
+ tile_width: Optional[int] = 512,
176
+ tile_row_overlap: Optional[int] = 256,
177
+ tile_col_overlap: Optional[int] = 256,
178
+ guidance_scale_tiles: Optional[List[List[float]]] = None,
179
+ seed_tiles: Optional[List[List[int]]] = None,
180
+ seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
181
+ seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
182
+ cpu_vae: Optional[bool] = False,
183
+ ):
184
+ r"""
185
+ Function to run the diffusion pipeline with tiling support.
186
+
187
+ Args:
188
+ prompt: either a single string (no tiling) or a list of lists with all the prompts to use (one list for each row of tiles). This will also define the tiling structure.
189
+ num_inference_steps: number of diffusions steps.
190
+ guidance_scale: classifier-free guidance.
191
+ seed: general random seed to initialize latents.
192
+ tile_height: height in pixels of each grid tile.
193
+ tile_width: width in pixels of each grid tile.
194
+ tile_row_overlap: number of overlap pixels between tiles in consecutive rows.
195
+ tile_col_overlap: number of overlap pixels between tiles in consecutive columns.
196
+ guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
197
+ guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
198
+ seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
199
+ seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be overriden. If "exclusive", only the latents that are affected exclusively by this tile (and no other tiles) will be overrriden.
200
+ seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be overriden using the given seed. Takes priority over seed_tiles.
201
+ cpu_vae: the decoder from latent space to pixel space can require too mucho GPU RAM for large images. If you find out of memory errors at the end of the generation process, try setting this parameter to True to run the decoder in CPU. Slower, but should run without memory issues.
202
+
203
+ Examples:
204
+
205
+ Returns:
206
+ A PIL image with the generated image.
207
+
208
+ """
209
+ if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
210
+ raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
211
+ grid_rows = len(prompt)
212
+ grid_cols = len(prompt[0])
213
+ if not all(len(row) == grid_cols for row in prompt):
214
+ raise ValueError("All prompt rows must have the same number of prompt columns")
215
+ if not isinstance(seed_tiles_mode, str) and (
216
+ not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
217
+ ):
218
+ raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
219
+ if isinstance(seed_tiles_mode, str):
220
+ seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
221
+
222
+ modes = [mode.value for mode in self.SeedTilesMode]
223
+ if any(mode not in modes for row in seed_tiles_mode for mode in row):
224
+ raise ValueError(f"Seed tiles mode must be one of {modes}")
225
+ if seed_reroll_regions is None:
226
+ seed_reroll_regions = []
227
+ batch_size = 1
228
+
229
+ # create original noisy latents using the timesteps
230
+ height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
231
+ width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
232
+ latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
233
+ generator = torch.Generator("cuda").manual_seed(seed)
234
+ latents = torch.randn(latents_shape, generator=generator, device=self.device)
235
+
236
+ # overwrite latents for specific tiles if provided
237
+ if seed_tiles is not None:
238
+ for row in range(grid_rows):
239
+ for col in range(grid_cols):
240
+ if (seed_tile := seed_tiles[row][col]) is not None:
241
+ mode = seed_tiles_mode[row][col]
242
+ if mode == self.SeedTilesMode.FULL.value:
243
+ row_init, row_end, col_init, col_end = _tile2latent_indices(
244
+ row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
245
+ )
246
+ else:
247
+ row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
248
+ row,
249
+ col,
250
+ tile_width,
251
+ tile_height,
252
+ tile_row_overlap,
253
+ tile_col_overlap,
254
+ grid_rows,
255
+ grid_cols,
256
+ )
257
+ tile_generator = torch.Generator("cuda").manual_seed(seed_tile)
258
+ tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
259
+ latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
260
+ tile_shape, generator=tile_generator, device=self.device
261
+ )
262
+
263
+ # overwrite again for seed reroll regions
264
+ for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
265
+ row_init, row_end, col_init, col_end = _pixel2latent_indices(
266
+ row_init, row_end, col_init, col_end
267
+ ) # to latent space coordinates
268
+ reroll_generator = torch.Generator("cuda").manual_seed(seed_reroll)
269
+ region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
270
+ latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
271
+ region_shape, generator=reroll_generator, device=self.device
272
+ )
273
+
274
+ # Prepare scheduler
275
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
276
+ extra_set_kwargs = {}
277
+ if accepts_offset:
278
+ extra_set_kwargs["offset"] = 1
279
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
280
+ # if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
281
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
282
+ latents = latents * self.scheduler.sigmas[0]
283
+
284
+ # get prompts text embeddings
285
+ text_input = [
286
+ [
287
+ self.tokenizer(
288
+ col,
289
+ padding="max_length",
290
+ max_length=self.tokenizer.model_max_length,
291
+ truncation=True,
292
+ return_tensors="pt",
293
+ )
294
+ for col in row
295
+ ]
296
+ for row in prompt
297
+ ]
298
+ text_embeddings = [[self.text_encoder(col.input_ids.to(self.device))[0] for col in row] for row in text_input]
299
+
300
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
301
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
302
+ # corresponds to doing no classifier free guidance.
303
+ do_classifier_free_guidance = guidance_scale > 1.0 # TODO: also active if any tile has guidance scale
304
+ # get unconditional embeddings for classifier free guidance
305
+ if do_classifier_free_guidance:
306
+ for i in range(grid_rows):
307
+ for j in range(grid_cols):
308
+ max_length = text_input[i][j].input_ids.shape[-1]
309
+ uncond_input = self.tokenizer(
310
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
311
+ )
312
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
313
+
314
+ # For classifier free guidance, we need to do two forward passes.
315
+ # Here we concatenate the unconditional and text embeddings into a single batch
316
+ # to avoid doing two forward passes
317
+ text_embeddings[i][j] = torch.cat([uncond_embeddings, text_embeddings[i][j]])
318
+
319
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
320
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
321
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
322
+ # and should be between [0, 1]
323
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
324
+ extra_step_kwargs = {}
325
+ if accepts_eta:
326
+ extra_step_kwargs["eta"] = eta
327
+
328
+ # Mask for tile weights strenght
329
+ tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size)
330
+
331
+ # Diffusion timesteps
332
+ for i, t in tqdm(enumerate(self.scheduler.timesteps)):
333
+ # Diffuse each tile
334
+ noise_preds = []
335
+ for row in range(grid_rows):
336
+ noise_preds_row = []
337
+ for col in range(grid_cols):
338
+ px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
339
+ row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
340
+ )
341
+ tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
342
+ # expand the latents if we are doing classifier free guidance
343
+ latent_model_input = torch.cat([tile_latents] * 2) if do_classifier_free_guidance else tile_latents
344
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
345
+ # predict the noise residual
346
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings[row][col])[
347
+ "sample"
348
+ ]
349
+ # perform guidance
350
+ if do_classifier_free_guidance:
351
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
352
+ guidance = (
353
+ guidance_scale
354
+ if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
355
+ else guidance_scale_tiles[row][col]
356
+ )
357
+ noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
358
+ noise_preds_row.append(noise_pred_tile)
359
+ noise_preds.append(noise_preds_row)
360
+ # Stitch noise predictions for all tiles
361
+ noise_pred = torch.zeros(latents.shape, device=self.device)
362
+ contributors = torch.zeros(latents.shape, device=self.device)
363
+ # Add each tile contribution to overall latents
364
+ for row in range(grid_rows):
365
+ for col in range(grid_cols):
366
+ px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
367
+ row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
368
+ )
369
+ noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
370
+ noise_preds[row][col] * tile_weights
371
+ )
372
+ contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
373
+ # Average overlapping areas with more than 1 contributor
374
+ noise_pred /= contributors
375
+
376
+ # compute the previous noisy sample x_t -> x_t-1
377
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
378
+
379
+ # scale and decode the image latents with vae
380
+ image = self.decode_latents(latents, cpu_vae)
381
+
382
+ return {"images": image}
383
+
384
+ def _gaussian_weights(self, tile_width, tile_height, nbatches):
385
+ """Generates a gaussian mask of weights for tile contributions"""
386
+ import numpy as np
387
+ from numpy import exp, pi, sqrt
388
+
389
+ latent_width = tile_width // 8
390
+ latent_height = tile_height // 8
391
+
392
+ var = 0.01
393
+ midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
394
+ x_probs = [
395
+ exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
396
+ for x in range(latent_width)
397
+ ]
398
+ midpoint = latent_height / 2
399
+ y_probs = [
400
+ exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
401
+ for y in range(latent_height)
402
+ ]
403
+
404
+ weights = np.outer(y_probs, x_probs)
405
+ return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))
v0.24.0/multilingual_stable_diffusion.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, List, Optional, Union
3
+
4
+ import torch
5
+ from transformers import (
6
+ CLIPImageProcessor,
7
+ CLIPTextModel,
8
+ CLIPTokenizer,
9
+ MBart50TokenizerFast,
10
+ MBartForConditionalGeneration,
11
+ pipeline,
12
+ )
13
+
14
+ from diffusers import DiffusionPipeline
15
+ from diffusers.configuration_utils import FrozenDict
16
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
17
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
18
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
19
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
20
+ from diffusers.utils import deprecate, logging
21
+
22
+
23
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
+
25
+
26
+ def detect_language(pipe, prompt, batch_size):
27
+ """helper function to detect language(s) of prompt"""
28
+
29
+ if batch_size == 1:
30
+ preds = pipe(prompt, top_k=1, truncation=True, max_length=128)
31
+ return preds[0]["label"]
32
+ else:
33
+ detected_languages = []
34
+ for p in prompt:
35
+ preds = pipe(p, top_k=1, truncation=True, max_length=128)
36
+ detected_languages.append(preds[0]["label"])
37
+
38
+ return detected_languages
39
+
40
+
41
+ def translate_prompt(prompt, translation_tokenizer, translation_model, device):
42
+ """helper function to translate prompt to English"""
43
+
44
+ encoded_prompt = translation_tokenizer(prompt, return_tensors="pt").to(device)
45
+ generated_tokens = translation_model.generate(**encoded_prompt, max_new_tokens=1000)
46
+ en_trans = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
47
+
48
+ return en_trans[0]
49
+
50
+
51
+ class MultilingualStableDiffusion(DiffusionPipeline):
52
+ r"""
53
+ Pipeline for text-to-image generation using Stable Diffusion in different languages.
54
+
55
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
56
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
57
+
58
+ Args:
59
+ detection_pipeline ([`pipeline`]):
60
+ Transformers pipeline to detect prompt's language.
61
+ translation_model ([`MBartForConditionalGeneration`]):
62
+ Model to translate prompt to English, if necessary. Please refer to the
63
+ [model card](https://huggingface.co/docs/transformers/model_doc/mbart) for details.
64
+ translation_tokenizer ([`MBart50TokenizerFast`]):
65
+ Tokenizer of the translation model.
66
+ vae ([`AutoencoderKL`]):
67
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
+ text_encoder ([`CLIPTextModel`]):
69
+ Frozen text-encoder. Stable Diffusion uses the text portion of
70
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
+ tokenizer (`CLIPTokenizer`):
73
+ Tokenizer of class
74
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
+ scheduler ([`SchedulerMixin`]):
77
+ A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
+ safety_checker ([`StableDiffusionSafetyChecker`]):
80
+ Classification module that estimates whether generated images could be considered offensive or harmful.
81
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
+ feature_extractor ([`CLIPImageProcessor`]):
83
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ detection_pipeline: pipeline,
89
+ translation_model: MBartForConditionalGeneration,
90
+ translation_tokenizer: MBart50TokenizerFast,
91
+ vae: AutoencoderKL,
92
+ text_encoder: CLIPTextModel,
93
+ tokenizer: CLIPTokenizer,
94
+ unet: UNet2DConditionModel,
95
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
96
+ safety_checker: StableDiffusionSafetyChecker,
97
+ feature_extractor: CLIPImageProcessor,
98
+ ):
99
+ super().__init__()
100
+
101
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
102
+ deprecation_message = (
103
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
104
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
105
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
106
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
107
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
108
+ " file"
109
+ )
110
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
111
+ new_config = dict(scheduler.config)
112
+ new_config["steps_offset"] = 1
113
+ scheduler._internal_dict = FrozenDict(new_config)
114
+
115
+ if safety_checker is None:
116
+ logger.warning(
117
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
118
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
119
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
120
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
121
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
122
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
123
+ )
124
+
125
+ self.register_modules(
126
+ detection_pipeline=detection_pipeline,
127
+ translation_model=translation_model,
128
+ translation_tokenizer=translation_tokenizer,
129
+ vae=vae,
130
+ text_encoder=text_encoder,
131
+ tokenizer=tokenizer,
132
+ unet=unet,
133
+ scheduler=scheduler,
134
+ safety_checker=safety_checker,
135
+ feature_extractor=feature_extractor,
136
+ )
137
+
138
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
139
+ r"""
140
+ Enable sliced attention computation.
141
+
142
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
143
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
144
+
145
+ Args:
146
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
147
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
148
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
149
+ `attention_head_dim` must be a multiple of `slice_size`.
150
+ """
151
+ if slice_size == "auto":
152
+ # half the attention head size is usually a good trade-off between
153
+ # speed and memory
154
+ slice_size = self.unet.config.attention_head_dim // 2
155
+ self.unet.set_attention_slice(slice_size)
156
+
157
+ def disable_attention_slicing(self):
158
+ r"""
159
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
160
+ back to computing attention in one step.
161
+ """
162
+ # set slice_size = `None` to disable `attention slicing`
163
+ self.enable_attention_slicing(None)
164
+
165
+ @torch.no_grad()
166
+ def __call__(
167
+ self,
168
+ prompt: Union[str, List[str]],
169
+ height: int = 512,
170
+ width: int = 512,
171
+ num_inference_steps: int = 50,
172
+ guidance_scale: float = 7.5,
173
+ negative_prompt: Optional[Union[str, List[str]]] = None,
174
+ num_images_per_prompt: Optional[int] = 1,
175
+ eta: float = 0.0,
176
+ generator: Optional[torch.Generator] = None,
177
+ latents: Optional[torch.FloatTensor] = None,
178
+ output_type: Optional[str] = "pil",
179
+ return_dict: bool = True,
180
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
181
+ callback_steps: int = 1,
182
+ **kwargs,
183
+ ):
184
+ r"""
185
+ Function invoked when calling the pipeline for generation.
186
+
187
+ Args:
188
+ prompt (`str` or `List[str]`):
189
+ The prompt or prompts to guide the image generation. Can be in different languages.
190
+ height (`int`, *optional*, defaults to 512):
191
+ The height in pixels of the generated image.
192
+ width (`int`, *optional*, defaults to 512):
193
+ The width in pixels of the generated image.
194
+ num_inference_steps (`int`, *optional*, defaults to 50):
195
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
196
+ expense of slower inference.
197
+ guidance_scale (`float`, *optional*, defaults to 7.5):
198
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
199
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
200
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
201
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
202
+ usually at the expense of lower image quality.
203
+ negative_prompt (`str` or `List[str]`, *optional*):
204
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
205
+ if `guidance_scale` is less than `1`).
206
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
207
+ The number of images to generate per prompt.
208
+ eta (`float`, *optional*, defaults to 0.0):
209
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
210
+ [`schedulers.DDIMScheduler`], will be ignored for others.
211
+ generator (`torch.Generator`, *optional*):
212
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
213
+ deterministic.
214
+ latents (`torch.FloatTensor`, *optional*):
215
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
216
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
217
+ tensor will ge generated by sampling using the supplied random `generator`.
218
+ output_type (`str`, *optional*, defaults to `"pil"`):
219
+ The output format of the generate image. Choose between
220
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
221
+ return_dict (`bool`, *optional*, defaults to `True`):
222
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
223
+ plain tuple.
224
+ callback (`Callable`, *optional*):
225
+ A function that will be called every `callback_steps` steps during inference. The function will be
226
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
227
+ callback_steps (`int`, *optional*, defaults to 1):
228
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
229
+ called at every step.
230
+
231
+ Returns:
232
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
233
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
234
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
235
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
236
+ (nsfw) content, according to the `safety_checker`.
237
+ """
238
+ if isinstance(prompt, str):
239
+ batch_size = 1
240
+ elif isinstance(prompt, list):
241
+ batch_size = len(prompt)
242
+ else:
243
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
244
+
245
+ if height % 8 != 0 or width % 8 != 0:
246
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
247
+
248
+ if (callback_steps is None) or (
249
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
250
+ ):
251
+ raise ValueError(
252
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
253
+ f" {type(callback_steps)}."
254
+ )
255
+
256
+ # detect language and translate if necessary
257
+ prompt_language = detect_language(self.detection_pipeline, prompt, batch_size)
258
+ if batch_size == 1 and prompt_language != "en":
259
+ prompt = translate_prompt(prompt, self.translation_tokenizer, self.translation_model, self.device)
260
+
261
+ if isinstance(prompt, list):
262
+ for index in range(batch_size):
263
+ if prompt_language[index] != "en":
264
+ p = translate_prompt(
265
+ prompt[index], self.translation_tokenizer, self.translation_model, self.device
266
+ )
267
+ prompt[index] = p
268
+
269
+ # get prompt text embeddings
270
+ text_inputs = self.tokenizer(
271
+ prompt,
272
+ padding="max_length",
273
+ max_length=self.tokenizer.model_max_length,
274
+ return_tensors="pt",
275
+ )
276
+ text_input_ids = text_inputs.input_ids
277
+
278
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
279
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
280
+ logger.warning(
281
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
282
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
283
+ )
284
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
285
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
286
+
287
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
288
+ bs_embed, seq_len, _ = text_embeddings.shape
289
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
290
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
291
+
292
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
293
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
294
+ # corresponds to doing no classifier free guidance.
295
+ do_classifier_free_guidance = guidance_scale > 1.0
296
+ # get unconditional embeddings for classifier free guidance
297
+ if do_classifier_free_guidance:
298
+ uncond_tokens: List[str]
299
+ if negative_prompt is None:
300
+ uncond_tokens = [""] * batch_size
301
+ elif type(prompt) is not type(negative_prompt):
302
+ raise TypeError(
303
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
304
+ f" {type(prompt)}."
305
+ )
306
+ elif isinstance(negative_prompt, str):
307
+ # detect language and translate it if necessary
308
+ negative_prompt_language = detect_language(self.detection_pipeline, negative_prompt, batch_size)
309
+ if negative_prompt_language != "en":
310
+ negative_prompt = translate_prompt(
311
+ negative_prompt, self.translation_tokenizer, self.translation_model, self.device
312
+ )
313
+ if isinstance(negative_prompt, str):
314
+ uncond_tokens = [negative_prompt]
315
+ elif batch_size != len(negative_prompt):
316
+ raise ValueError(
317
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
318
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
319
+ " the batch size of `prompt`."
320
+ )
321
+ else:
322
+ # detect language and translate it if necessary
323
+ if isinstance(negative_prompt, list):
324
+ negative_prompt_languages = detect_language(self.detection_pipeline, negative_prompt, batch_size)
325
+ for index in range(batch_size):
326
+ if negative_prompt_languages[index] != "en":
327
+ p = translate_prompt(
328
+ negative_prompt[index], self.translation_tokenizer, self.translation_model, self.device
329
+ )
330
+ negative_prompt[index] = p
331
+ uncond_tokens = negative_prompt
332
+
333
+ max_length = text_input_ids.shape[-1]
334
+ uncond_input = self.tokenizer(
335
+ uncond_tokens,
336
+ padding="max_length",
337
+ max_length=max_length,
338
+ truncation=True,
339
+ return_tensors="pt",
340
+ )
341
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
342
+
343
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
344
+ seq_len = uncond_embeddings.shape[1]
345
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
346
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
347
+
348
+ # For classifier free guidance, we need to do two forward passes.
349
+ # Here we concatenate the unconditional and text embeddings into a single batch
350
+ # to avoid doing two forward passes
351
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
352
+
353
+ # get the initial random noise unless the user supplied it
354
+
355
+ # Unlike in other pipelines, latents need to be generated in the target device
356
+ # for 1-to-1 results reproducibility with the CompVis implementation.
357
+ # However this currently doesn't work in `mps`.
358
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
359
+ latents_dtype = text_embeddings.dtype
360
+ if latents is None:
361
+ if self.device.type == "mps":
362
+ # randn does not work reproducibly on mps
363
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
364
+ self.device
365
+ )
366
+ else:
367
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
368
+ else:
369
+ if latents.shape != latents_shape:
370
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
371
+ latents = latents.to(self.device)
372
+
373
+ # set timesteps
374
+ self.scheduler.set_timesteps(num_inference_steps)
375
+
376
+ # Some schedulers like PNDM have timesteps as arrays
377
+ # It's more optimized to move all timesteps to correct device beforehand
378
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
379
+
380
+ # scale the initial noise by the standard deviation required by the scheduler
381
+ latents = latents * self.scheduler.init_noise_sigma
382
+
383
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
384
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
385
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
386
+ # and should be between [0, 1]
387
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
388
+ extra_step_kwargs = {}
389
+ if accepts_eta:
390
+ extra_step_kwargs["eta"] = eta
391
+
392
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
393
+ # expand the latents if we are doing classifier free guidance
394
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
395
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
396
+
397
+ # predict the noise residual
398
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
399
+
400
+ # perform guidance
401
+ if do_classifier_free_guidance:
402
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
403
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
404
+
405
+ # compute the previous noisy sample x_t -> x_t-1
406
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
407
+
408
+ # call the callback, if provided
409
+ if callback is not None and i % callback_steps == 0:
410
+ step_idx = i // getattr(self.scheduler, "order", 1)
411
+ callback(step_idx, t, latents)
412
+
413
+ latents = 1 / 0.18215 * latents
414
+ image = self.vae.decode(latents).sample
415
+
416
+ image = (image / 2 + 0.5).clamp(0, 1)
417
+
418
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
419
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
420
+
421
+ if self.safety_checker is not None:
422
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
423
+ self.device
424
+ )
425
+ image, has_nsfw_concept = self.safety_checker(
426
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
427
+ )
428
+ else:
429
+ has_nsfw_concept = None
430
+
431
+ if output_type == "pil":
432
+ image = self.numpy_to_pil(image)
433
+
434
+ if not return_dict:
435
+ return (image, has_nsfw_concept)
436
+
437
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/one_step_unet.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import torch
3
+
4
+ from diffusers import DiffusionPipeline
5
+
6
+
7
+ class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
8
+ def __init__(self, unet, scheduler):
9
+ super().__init__()
10
+
11
+ self.register_modules(unet=unet, scheduler=scheduler)
12
+
13
+ def __call__(self):
14
+ image = torch.randn(
15
+ (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
16
+ )
17
+ timestep = 1
18
+
19
+ model_output = self.unet(image, timestep).sample
20
+ scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
21
+
22
+ result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output)
23
+
24
+ return result
v0.24.0/pipeline_fabric.py ADDED
@@ -0,0 +1,751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 FABRIC authors and the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List, Optional, Union
15
+
16
+ import torch
17
+ from packaging import version
18
+ from PIL import Image
19
+ from transformers import CLIPTextModel, CLIPTokenizer
20
+
21
+ from diffusers import AutoencoderKL, UNet2DConditionModel
22
+ from diffusers.configuration_utils import FrozenDict
23
+ from diffusers.image_processor import VaeImageProcessor
24
+ from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
25
+ from diffusers.models.attention import BasicTransformerBlock
26
+ from diffusers.models.attention_processor import LoRAAttnProcessor
27
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
28
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
29
+ from diffusers.schedulers import EulerAncestralDiscreteScheduler, KarrasDiffusionSchedulers
30
+ from diffusers.utils import (
31
+ deprecate,
32
+ logging,
33
+ replace_example_docstring,
34
+ )
35
+ from diffusers.utils.torch_utils import randn_tensor
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+ EXAMPLE_DOC_STRING = """
41
+ Examples:
42
+ ```py
43
+ >>> from diffusers import DiffusionPipeline
44
+ >>> import torch
45
+
46
+ >>> model_id = "dreamlike-art/dreamlike-photoreal-2.0"
47
+ >>> pipe = DiffusionPipeline(model_id, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric")
48
+ >>> pipe = pipe.to("cuda")
49
+ >>> prompt = "a giant standing in a fantasy landscape best quality"
50
+ >>> liked = [] # list of images for positive feedback
51
+ >>> disliked = [] # list of images for negative feedback
52
+ >>> image = pipe(prompt, num_images=4, liked=liked, disliked=disliked).images[0]
53
+ ```
54
+ """
55
+
56
+
57
+ class FabricCrossAttnProcessor:
58
+ def __init__(self):
59
+ self.attntion_probs = None
60
+
61
+ def __call__(
62
+ self,
63
+ attn,
64
+ hidden_states,
65
+ encoder_hidden_states=None,
66
+ attention_mask=None,
67
+ weights=None,
68
+ lora_scale=1.0,
69
+ ):
70
+ batch_size, sequence_length, _ = (
71
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
72
+ )
73
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
74
+
75
+ if isinstance(attn.processor, LoRAAttnProcessor):
76
+ query = attn.to_q(hidden_states) + lora_scale * attn.processor.to_q_lora(hidden_states)
77
+ else:
78
+ query = attn.to_q(hidden_states)
79
+
80
+ if encoder_hidden_states is None:
81
+ encoder_hidden_states = hidden_states
82
+ elif attn.norm_cross:
83
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
84
+
85
+ if isinstance(attn.processor, LoRAAttnProcessor):
86
+ key = attn.to_k(encoder_hidden_states) + lora_scale * attn.processor.to_k_lora(encoder_hidden_states)
87
+ value = attn.to_v(encoder_hidden_states) + lora_scale * attn.processor.to_v_lora(encoder_hidden_states)
88
+ else:
89
+ key = attn.to_k(encoder_hidden_states)
90
+ value = attn.to_v(encoder_hidden_states)
91
+
92
+ query = attn.head_to_batch_dim(query)
93
+ key = attn.head_to_batch_dim(key)
94
+ value = attn.head_to_batch_dim(value)
95
+
96
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
97
+
98
+ if weights is not None:
99
+ if weights.shape[0] != 1:
100
+ weights = weights.repeat_interleave(attn.heads, dim=0)
101
+ attention_probs = attention_probs * weights[:, None]
102
+ attention_probs = attention_probs / attention_probs.sum(dim=-1, keepdim=True)
103
+
104
+ hidden_states = torch.bmm(attention_probs, value)
105
+ hidden_states = attn.batch_to_head_dim(hidden_states)
106
+
107
+ # linear proj
108
+ if isinstance(attn.processor, LoRAAttnProcessor):
109
+ hidden_states = attn.to_out[0](hidden_states) + lora_scale * attn.processor.to_out_lora(hidden_states)
110
+ else:
111
+ hidden_states = attn.to_out[0](hidden_states)
112
+ # dropout
113
+ hidden_states = attn.to_out[1](hidden_states)
114
+
115
+ return hidden_states
116
+
117
+
118
+ class FabricPipeline(DiffusionPipeline):
119
+ r"""
120
+ Pipeline for text-to-image generation using Stable Diffusion and conditioning the results using feedback images.
121
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
122
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
123
+
124
+ Args:
125
+ vae ([`AutoencoderKL`]):
126
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
127
+ text_encoder ([`~transformers.CLIPTextModel`]):
128
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
129
+ tokenizer ([`~transformers.CLIPTokenizer`]):
130
+ A `CLIPTokenizer` to tokenize text.
131
+ unet ([`UNet2DConditionModel`]):
132
+ A `UNet2DConditionModel` to denoise the encoded image latents.
133
+ scheduler ([`EulerAncestralDiscreteScheduler`]):
134
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
135
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
136
+ safety_checker ([`StableDiffusionSafetyChecker`]):
137
+ Classification module that estimates whether generated images could be considered offensive or harmful.
138
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
139
+ about a model's potential harms.
140
+ """
141
+
142
+ def __init__(
143
+ self,
144
+ vae: AutoencoderKL,
145
+ text_encoder: CLIPTextModel,
146
+ tokenizer: CLIPTokenizer,
147
+ unet: UNet2DConditionModel,
148
+ scheduler: KarrasDiffusionSchedulers,
149
+ requires_safety_checker: bool = True,
150
+ ):
151
+ super().__init__()
152
+
153
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
154
+ version.parse(unet.config._diffusers_version).base_version
155
+ ) < version.parse("0.9.0.dev0")
156
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
157
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
158
+ deprecation_message = (
159
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
160
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
161
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
162
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
163
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
164
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
165
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
166
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
167
+ " the `unet/config.json` file"
168
+ )
169
+
170
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
171
+ new_config = dict(unet.config)
172
+ new_config["sample_size"] = 64
173
+ unet._internal_dict = FrozenDict(new_config)
174
+
175
+ self.register_modules(
176
+ unet=unet,
177
+ vae=vae,
178
+ text_encoder=text_encoder,
179
+ tokenizer=tokenizer,
180
+ scheduler=scheduler,
181
+ )
182
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
183
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
184
+
185
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
186
+ def _encode_prompt(
187
+ self,
188
+ prompt,
189
+ device,
190
+ num_images_per_prompt,
191
+ do_classifier_free_guidance,
192
+ negative_prompt=None,
193
+ prompt_embeds: Optional[torch.FloatTensor] = None,
194
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
195
+ lora_scale: Optional[float] = None,
196
+ ):
197
+ r"""
198
+ Encodes the prompt into text encoder hidden states.
199
+
200
+ Args:
201
+ prompt (`str` or `List[str]`, *optional*):
202
+ prompt to be encoded
203
+ device: (`torch.device`):
204
+ torch device
205
+ num_images_per_prompt (`int`):
206
+ number of images that should be generated per prompt
207
+ do_classifier_free_guidance (`bool`):
208
+ whether to use classifier free guidance or not
209
+ negative_prompt (`str` or `List[str]`, *optional*):
210
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
211
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
212
+ less than `1`).
213
+ prompt_embeds (`torch.FloatTensor`, *optional*):
214
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
215
+ provided, text embeddings will be generated from `prompt` input argument.
216
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
217
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
218
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
219
+ argument.
220
+ lora_scale (`float`, *optional*):
221
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
222
+ """
223
+ # set lora scale so that monkey patched LoRA
224
+ # function of text encoder can correctly access it
225
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
226
+ self._lora_scale = lora_scale
227
+
228
+ if prompt is not None and isinstance(prompt, str):
229
+ batch_size = 1
230
+ elif prompt is not None and isinstance(prompt, list):
231
+ batch_size = len(prompt)
232
+ else:
233
+ batch_size = prompt_embeds.shape[0]
234
+
235
+ if prompt_embeds is None:
236
+ # textual inversion: procecss multi-vector tokens if necessary
237
+ if isinstance(self, TextualInversionLoaderMixin):
238
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
239
+
240
+ text_inputs = self.tokenizer(
241
+ prompt,
242
+ padding="max_length",
243
+ max_length=self.tokenizer.model_max_length,
244
+ truncation=True,
245
+ return_tensors="pt",
246
+ )
247
+ text_input_ids = text_inputs.input_ids
248
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
249
+
250
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
251
+ text_input_ids, untruncated_ids
252
+ ):
253
+ removed_text = self.tokenizer.batch_decode(
254
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
255
+ )
256
+ logger.warning(
257
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
258
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
259
+ )
260
+
261
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
262
+ attention_mask = text_inputs.attention_mask.to(device)
263
+ else:
264
+ attention_mask = None
265
+
266
+ prompt_embeds = self.text_encoder(
267
+ text_input_ids.to(device),
268
+ attention_mask=attention_mask,
269
+ )
270
+ prompt_embeds = prompt_embeds[0]
271
+
272
+ if self.text_encoder is not None:
273
+ prompt_embeds_dtype = self.text_encoder.dtype
274
+ elif self.unet is not None:
275
+ prompt_embeds_dtype = self.unet.dtype
276
+ else:
277
+ prompt_embeds_dtype = prompt_embeds.dtype
278
+
279
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
280
+
281
+ bs_embed, seq_len, _ = prompt_embeds.shape
282
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
283
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
284
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
285
+
286
+ # get unconditional embeddings for classifier free guidance
287
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
288
+ uncond_tokens: List[str]
289
+ if negative_prompt is None:
290
+ uncond_tokens = [""] * batch_size
291
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
292
+ raise TypeError(
293
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
294
+ f" {type(prompt)}."
295
+ )
296
+ elif isinstance(negative_prompt, str):
297
+ uncond_tokens = [negative_prompt]
298
+ elif batch_size != len(negative_prompt):
299
+ raise ValueError(
300
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
301
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
302
+ " the batch size of `prompt`."
303
+ )
304
+ else:
305
+ uncond_tokens = negative_prompt
306
+
307
+ # textual inversion: procecss multi-vector tokens if necessary
308
+ if isinstance(self, TextualInversionLoaderMixin):
309
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
310
+
311
+ max_length = prompt_embeds.shape[1]
312
+ uncond_input = self.tokenizer(
313
+ uncond_tokens,
314
+ padding="max_length",
315
+ max_length=max_length,
316
+ truncation=True,
317
+ return_tensors="pt",
318
+ )
319
+
320
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
321
+ attention_mask = uncond_input.attention_mask.to(device)
322
+ else:
323
+ attention_mask = None
324
+
325
+ negative_prompt_embeds = self.text_encoder(
326
+ uncond_input.input_ids.to(device),
327
+ attention_mask=attention_mask,
328
+ )
329
+ negative_prompt_embeds = negative_prompt_embeds[0]
330
+
331
+ if do_classifier_free_guidance:
332
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
333
+ seq_len = negative_prompt_embeds.shape[1]
334
+
335
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
336
+
337
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
338
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
339
+
340
+ # For classifier free guidance, we need to do two forward passes.
341
+ # Here we concatenate the unconditional and text embeddings into a single batch
342
+ # to avoid doing two forward passes
343
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
344
+
345
+ return prompt_embeds
346
+
347
+ def get_unet_hidden_states(self, z_all, t, prompt_embd):
348
+ cached_hidden_states = []
349
+ for module in self.unet.modules():
350
+ if isinstance(module, BasicTransformerBlock):
351
+
352
+ def new_forward(self, hidden_states, *args, **kwargs):
353
+ cached_hidden_states.append(hidden_states.clone().detach().cpu())
354
+ return self.old_forward(hidden_states, *args, **kwargs)
355
+
356
+ module.attn1.old_forward = module.attn1.forward
357
+ module.attn1.forward = new_forward.__get__(module.attn1)
358
+
359
+ # run forward pass to cache hidden states, output can be discarded
360
+ _ = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
361
+
362
+ # restore original forward pass
363
+ for module in self.unet.modules():
364
+ if isinstance(module, BasicTransformerBlock):
365
+ module.attn1.forward = module.attn1.old_forward
366
+ del module.attn1.old_forward
367
+
368
+ return cached_hidden_states
369
+
370
+ def unet_forward_with_cached_hidden_states(
371
+ self,
372
+ z_all,
373
+ t,
374
+ prompt_embd,
375
+ cached_pos_hiddens: Optional[List[torch.Tensor]] = None,
376
+ cached_neg_hiddens: Optional[List[torch.Tensor]] = None,
377
+ pos_weights=(0.8, 0.8),
378
+ neg_weights=(0.5, 0.5),
379
+ ):
380
+ if cached_pos_hiddens is None and cached_neg_hiddens is None:
381
+ return self.unet(z_all, t, encoder_hidden_states=prompt_embd)
382
+
383
+ local_pos_weights = torch.linspace(*pos_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
384
+ local_neg_weights = torch.linspace(*neg_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
385
+ for block, pos_weight, neg_weight in zip(
386
+ self.unet.down_blocks + [self.unet.mid_block] + self.unet.up_blocks,
387
+ local_pos_weights + [pos_weights[1]] + local_pos_weights[::-1],
388
+ local_neg_weights + [neg_weights[1]] + local_neg_weights[::-1],
389
+ ):
390
+ for module in block.modules():
391
+ if isinstance(module, BasicTransformerBlock):
392
+
393
+ def new_forward(
394
+ self,
395
+ hidden_states,
396
+ pos_weight=pos_weight,
397
+ neg_weight=neg_weight,
398
+ **kwargs,
399
+ ):
400
+ cond_hiddens, uncond_hiddens = hidden_states.chunk(2, dim=0)
401
+ batch_size, d_model = cond_hiddens.shape[:2]
402
+ device, dtype = hidden_states.device, hidden_states.dtype
403
+
404
+ weights = torch.ones(batch_size, d_model, device=device, dtype=dtype)
405
+ out_pos = self.old_forward(hidden_states)
406
+ out_neg = self.old_forward(hidden_states)
407
+
408
+ if cached_pos_hiddens is not None:
409
+ cached_pos_hs = cached_pos_hiddens.pop(0).to(hidden_states.device)
410
+ cond_pos_hs = torch.cat([cond_hiddens, cached_pos_hs], dim=1)
411
+ pos_weights = weights.clone().repeat(1, 1 + cached_pos_hs.shape[1] // d_model)
412
+ pos_weights[:, d_model:] = pos_weight
413
+ attn_with_weights = FabricCrossAttnProcessor()
414
+ out_pos = attn_with_weights(
415
+ self,
416
+ cond_hiddens,
417
+ encoder_hidden_states=cond_pos_hs,
418
+ weights=pos_weights,
419
+ )
420
+ else:
421
+ out_pos = self.old_forward(cond_hiddens)
422
+
423
+ if cached_neg_hiddens is not None:
424
+ cached_neg_hs = cached_neg_hiddens.pop(0).to(hidden_states.device)
425
+ uncond_neg_hs = torch.cat([uncond_hiddens, cached_neg_hs], dim=1)
426
+ neg_weights = weights.clone().repeat(1, 1 + cached_neg_hs.shape[1] // d_model)
427
+ neg_weights[:, d_model:] = neg_weight
428
+ attn_with_weights = FabricCrossAttnProcessor()
429
+ out_neg = attn_with_weights(
430
+ self,
431
+ uncond_hiddens,
432
+ encoder_hidden_states=uncond_neg_hs,
433
+ weights=neg_weights,
434
+ )
435
+ else:
436
+ out_neg = self.old_forward(uncond_hiddens)
437
+
438
+ out = torch.cat([out_pos, out_neg], dim=0)
439
+ return out
440
+
441
+ module.attn1.old_forward = module.attn1.forward
442
+ module.attn1.forward = new_forward.__get__(module.attn1)
443
+
444
+ out = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
445
+
446
+ # restore original forward pass
447
+ for module in self.unet.modules():
448
+ if isinstance(module, BasicTransformerBlock):
449
+ module.attn1.forward = module.attn1.old_forward
450
+ del module.attn1.old_forward
451
+
452
+ return out
453
+
454
+ def preprocess_feedback_images(self, images, vae, dim, device, dtype, generator) -> torch.tensor:
455
+ images_t = [self.image_to_tensor(img, dim, dtype) for img in images]
456
+ images_t = torch.stack(images_t).to(device)
457
+ latents = vae.config.scaling_factor * vae.encode(images_t).latent_dist.sample(generator)
458
+
459
+ return torch.cat([latents], dim=0)
460
+
461
+ def check_inputs(
462
+ self,
463
+ prompt,
464
+ negative_prompt=None,
465
+ liked=None,
466
+ disliked=None,
467
+ height=None,
468
+ width=None,
469
+ ):
470
+ if prompt is None:
471
+ raise ValueError("Provide `prompt`. Cannot leave both `prompt` undefined.")
472
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
473
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
474
+
475
+ if negative_prompt is not None and (
476
+ not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list)
477
+ ):
478
+ raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
479
+
480
+ if liked is not None and not isinstance(liked, list):
481
+ raise ValueError(f"`liked` has to be of type `list` but is {type(liked)}")
482
+
483
+ if disliked is not None and not isinstance(disliked, list):
484
+ raise ValueError(f"`disliked` has to be of type `list` but is {type(disliked)}")
485
+
486
+ if height is not None and not isinstance(height, int):
487
+ raise ValueError(f"`height` has to be of type `int` but is {type(height)}")
488
+
489
+ if width is not None and not isinstance(width, int):
490
+ raise ValueError(f"`width` has to be of type `int` but is {type(width)}")
491
+
492
+ @torch.no_grad()
493
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
494
+ def __call__(
495
+ self,
496
+ prompt: Optional[Union[str, List[str]]] = "",
497
+ negative_prompt: Optional[Union[str, List[str]]] = "lowres, bad anatomy, bad hands, cropped, worst quality",
498
+ liked: Optional[Union[List[str], List[Image.Image]]] = [],
499
+ disliked: Optional[Union[List[str], List[Image.Image]]] = [],
500
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
501
+ height: int = 512,
502
+ width: int = 512,
503
+ return_dict: bool = True,
504
+ num_images: int = 4,
505
+ guidance_scale: float = 7.0,
506
+ num_inference_steps: int = 20,
507
+ output_type: Optional[str] = "pil",
508
+ feedback_start_ratio: float = 0.33,
509
+ feedback_end_ratio: float = 0.66,
510
+ min_weight: float = 0.05,
511
+ max_weight: float = 0.8,
512
+ neg_scale: float = 0.5,
513
+ pos_bottleneck_scale: float = 1.0,
514
+ neg_bottleneck_scale: float = 1.0,
515
+ latents: Optional[torch.FloatTensor] = None,
516
+ ):
517
+ r"""
518
+ The call function to the pipeline for generation. Generate a trajectory of images with binary feedback. The
519
+ feedback can be given as a list of liked and disliked images.
520
+
521
+ Args:
522
+ prompt (`str` or `List[str]`, *optional*):
523
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`
524
+ instead.
525
+ negative_prompt (`str` or `List[str]`, *optional*):
526
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
527
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
528
+ liked (`List[Image.Image]` or `List[str]`, *optional*):
529
+ Encourages images with liked features.
530
+ disliked (`List[Image.Image]` or `List[str]`, *optional*):
531
+ Discourages images with disliked features.
532
+ generator (`torch.Generator` or `List[torch.Generator]` or `int`, *optional*):
533
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) or an `int` to
534
+ make generation deterministic.
535
+ height (`int`, *optional*, defaults to 512):
536
+ Height of the generated image.
537
+ width (`int`, *optional*, defaults to 512):
538
+ Width of the generated image.
539
+ num_images (`int`, *optional*, defaults to 4):
540
+ The number of images to generate per prompt.
541
+ guidance_scale (`float`, *optional*, defaults to 7.0):
542
+ A higher guidance scale value encourages the model to generate images closely linked to the text
543
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
544
+ num_inference_steps (`int`, *optional*, defaults to 20):
545
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
546
+ expense of slower inference.
547
+ output_type (`str`, *optional*, defaults to `"pil"`):
548
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
549
+ return_dict (`bool`, *optional*, defaults to `True`):
550
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
551
+ plain tuple.
552
+ feedback_start_ratio (`float`, *optional*, defaults to `.33`):
553
+ Start point for providing feedback (between 0 and 1).
554
+ feedback_end_ratio (`float`, *optional*, defaults to `.66`):
555
+ End point for providing feedback (between 0 and 1).
556
+ min_weight (`float`, *optional*, defaults to `.05`):
557
+ Minimum weight for feedback.
558
+ max_weight (`float`, *optional*, defults tp `1.0`):
559
+ Maximum weight for feedback.
560
+ neg_scale (`float`, *optional*, defaults to `.5`):
561
+ Scale factor for negative feedback.
562
+
563
+ Examples:
564
+
565
+ Returns:
566
+ [`~pipelines.fabric.FabricPipelineOutput`] or `tuple`:
567
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
568
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
569
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
570
+ "not-safe-for-work" (nsfw) content.
571
+
572
+ """
573
+
574
+ self.check_inputs(prompt, negative_prompt, liked, disliked)
575
+
576
+ device = self._execution_device
577
+ dtype = self.unet.dtype
578
+
579
+ if isinstance(prompt, str) and prompt is not None:
580
+ batch_size = 1
581
+ elif isinstance(prompt, list) and prompt is not None:
582
+ batch_size = len(prompt)
583
+ else:
584
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
585
+
586
+ if isinstance(negative_prompt, str):
587
+ negative_prompt = negative_prompt
588
+ elif isinstance(negative_prompt, list):
589
+ negative_prompt = negative_prompt
590
+ else:
591
+ assert len(negative_prompt) == batch_size
592
+
593
+ shape = (
594
+ batch_size * num_images,
595
+ self.unet.config.in_channels,
596
+ height // self.vae_scale_factor,
597
+ width // self.vae_scale_factor,
598
+ )
599
+ latent_noise = randn_tensor(
600
+ shape,
601
+ device=device,
602
+ dtype=dtype,
603
+ generator=generator,
604
+ )
605
+
606
+ positive_latents = (
607
+ self.preprocess_feedback_images(liked, self.vae, (height, width), device, dtype, generator)
608
+ if liked and len(liked) > 0
609
+ else torch.tensor(
610
+ [],
611
+ device=device,
612
+ dtype=dtype,
613
+ )
614
+ )
615
+ negative_latents = (
616
+ self.preprocess_feedback_images(disliked, self.vae, (height, width), device, dtype, generator)
617
+ if disliked and len(disliked) > 0
618
+ else torch.tensor(
619
+ [],
620
+ device=device,
621
+ dtype=dtype,
622
+ )
623
+ )
624
+
625
+ do_classifier_free_guidance = guidance_scale > 0.1
626
+
627
+ (prompt_neg_embs, prompt_pos_embs) = self._encode_prompt(
628
+ prompt,
629
+ device,
630
+ num_images,
631
+ do_classifier_free_guidance,
632
+ negative_prompt,
633
+ ).split([num_images * batch_size, num_images * batch_size])
634
+
635
+ batched_prompt_embd = torch.cat([prompt_pos_embs, prompt_neg_embs], dim=0)
636
+
637
+ null_tokens = self.tokenizer(
638
+ [""],
639
+ return_tensors="pt",
640
+ max_length=self.tokenizer.model_max_length,
641
+ padding="max_length",
642
+ truncation=True,
643
+ )
644
+
645
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
646
+ attention_mask = null_tokens.attention_mask.to(device)
647
+ else:
648
+ attention_mask = None
649
+
650
+ null_prompt_emb = self.text_encoder(
651
+ input_ids=null_tokens.input_ids.to(device),
652
+ attention_mask=attention_mask,
653
+ ).last_hidden_state
654
+
655
+ null_prompt_emb = null_prompt_emb.to(device=device, dtype=dtype)
656
+
657
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
658
+ timesteps = self.scheduler.timesteps
659
+ latent_noise = latent_noise * self.scheduler.init_noise_sigma
660
+
661
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
662
+
663
+ ref_start_idx = round(len(timesteps) * feedback_start_ratio)
664
+ ref_end_idx = round(len(timesteps) * feedback_end_ratio)
665
+
666
+ with self.progress_bar(total=num_inference_steps) as pbar:
667
+ for i, t in enumerate(timesteps):
668
+ sigma = self.scheduler.sigma_t[t] if hasattr(self.scheduler, "sigma_t") else 0
669
+ if hasattr(self.scheduler, "sigmas"):
670
+ sigma = self.scheduler.sigmas[i]
671
+
672
+ alpha_hat = 1 / (sigma**2 + 1)
673
+
674
+ z_single = self.scheduler.scale_model_input(latent_noise, t)
675
+ z_all = torch.cat([z_single] * 2, dim=0)
676
+ z_ref = torch.cat([positive_latents, negative_latents], dim=0)
677
+
678
+ if i >= ref_start_idx and i <= ref_end_idx:
679
+ weight_factor = max_weight
680
+ else:
681
+ weight_factor = min_weight
682
+
683
+ pos_ws = (weight_factor, weight_factor * pos_bottleneck_scale)
684
+ neg_ws = (weight_factor * neg_scale, weight_factor * neg_scale * neg_bottleneck_scale)
685
+
686
+ if z_ref.size(0) > 0 and weight_factor > 0:
687
+ noise = torch.randn_like(z_ref)
688
+ if isinstance(self.scheduler, EulerAncestralDiscreteScheduler):
689
+ z_ref_noised = (alpha_hat**0.5 * z_ref + (1 - alpha_hat) ** 0.5 * noise).type(dtype)
690
+ else:
691
+ z_ref_noised = self.scheduler.add_noise(z_ref, noise, t)
692
+
693
+ ref_prompt_embd = torch.cat(
694
+ [null_prompt_emb] * (len(positive_latents) + len(negative_latents)), dim=0
695
+ )
696
+ cached_hidden_states = self.get_unet_hidden_states(z_ref_noised, t, ref_prompt_embd)
697
+
698
+ n_pos, n_neg = positive_latents.shape[0], negative_latents.shape[0]
699
+ cached_pos_hs, cached_neg_hs = [], []
700
+ for hs in cached_hidden_states:
701
+ cached_pos, cached_neg = hs.split([n_pos, n_neg], dim=0)
702
+ cached_pos = cached_pos.view(1, -1, *cached_pos.shape[2:]).expand(num_images, -1, -1)
703
+ cached_neg = cached_neg.view(1, -1, *cached_neg.shape[2:]).expand(num_images, -1, -1)
704
+ cached_pos_hs.append(cached_pos)
705
+ cached_neg_hs.append(cached_neg)
706
+
707
+ if n_pos == 0:
708
+ cached_pos_hs = None
709
+ if n_neg == 0:
710
+ cached_neg_hs = None
711
+ else:
712
+ cached_pos_hs, cached_neg_hs = None, None
713
+ unet_out = self.unet_forward_with_cached_hidden_states(
714
+ z_all,
715
+ t,
716
+ prompt_embd=batched_prompt_embd,
717
+ cached_pos_hiddens=cached_pos_hs,
718
+ cached_neg_hiddens=cached_neg_hs,
719
+ pos_weights=pos_ws,
720
+ neg_weights=neg_ws,
721
+ )[0]
722
+
723
+ noise_cond, noise_uncond = unet_out.chunk(2)
724
+ guidance = noise_cond - noise_uncond
725
+ noise_pred = noise_uncond + guidance_scale * guidance
726
+ latent_noise = self.scheduler.step(noise_pred, t, latent_noise)[0]
727
+
728
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
729
+ pbar.update()
730
+
731
+ y = self.vae.decode(latent_noise / self.vae.config.scaling_factor, return_dict=False)[0]
732
+ imgs = self.image_processor.postprocess(
733
+ y,
734
+ output_type=output_type,
735
+ )
736
+
737
+ if not return_dict:
738
+ return imgs
739
+
740
+ return StableDiffusionPipelineOutput(imgs, False)
741
+
742
+ def image_to_tensor(self, image: Union[str, Image.Image], dim: tuple, dtype):
743
+ """
744
+ Convert latent PIL image to a torch tensor for further processing.
745
+ """
746
+ if isinstance(image, str):
747
+ image = Image.open(image)
748
+ if not image.mode == "RGB":
749
+ image = image.convert("RGB")
750
+ image = self.image_processor.preprocess(image, height=dim[0], width=dim[1])[0]
751
+ return image.type(dtype)
v0.24.0/pipeline_prompt2prompt.py ADDED
@@ -0,0 +1,861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import abc
18
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+ import torch.nn.functional as F
23
+
24
+ from ...src.diffusers.models.attention import Attention
25
+ from ...src.diffusers.pipelines.stable_diffusion import StableDiffusionPipeline, StableDiffusionPipelineOutput
26
+
27
+
28
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
29
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
30
+ """
31
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
32
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
33
+ """
34
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
35
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
36
+ # rescale the results from guidance (fixes overexposure)
37
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
38
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
39
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
40
+ return noise_cfg
41
+
42
+
43
+ class Prompt2PromptPipeline(StableDiffusionPipeline):
44
+ r"""
45
+ Args:
46
+ Prompt-to-Prompt-Pipeline for text-to-image generation using Stable Diffusion. This model inherits from
47
+ [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for
48
+ all the pipelines (such as downloading or saving, running on a particular device, etc.)
49
+ vae ([`AutoencoderKL`]):
50
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
51
+ text_encoder ([`CLIPTextModel`]):
52
+ Frozen text-encoder. Stable Diffusion uses the text portion of
53
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
54
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
55
+ tokenizer (`CLIPTokenizer`):
56
+ Tokenizer of class
57
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
58
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler
59
+ ([`SchedulerMixin`]):
60
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
61
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
62
+ safety_checker ([`StableDiffusionSafetyChecker`]):
63
+ Classification module that estimates whether generated images could be considered offensive or harmful.
64
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
65
+ feature_extractor ([`CLIPFeatureExtractor`]):
66
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
67
+ """
68
+
69
+ _optional_components = ["safety_checker", "feature_extractor"]
70
+
71
+ @torch.no_grad()
72
+ def __call__(
73
+ self,
74
+ prompt: Union[str, List[str]],
75
+ height: Optional[int] = None,
76
+ width: Optional[int] = None,
77
+ num_inference_steps: int = 50,
78
+ guidance_scale: float = 7.5,
79
+ negative_prompt: Optional[Union[str, List[str]]] = None,
80
+ num_images_per_prompt: Optional[int] = 1,
81
+ eta: float = 0.0,
82
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
83
+ latents: Optional[torch.FloatTensor] = None,
84
+ prompt_embeds: Optional[torch.FloatTensor] = None,
85
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
86
+ output_type: Optional[str] = "pil",
87
+ return_dict: bool = True,
88
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
89
+ callback_steps: Optional[int] = 1,
90
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
91
+ guidance_rescale: float = 0.0,
92
+ ):
93
+ r"""
94
+ Function invoked when calling the pipeline for generation.
95
+
96
+ Args:
97
+ prompt (`str` or `List[str]`):
98
+ The prompt or prompts to guide the image generation.
99
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
100
+ The height in pixels of the generated image.
101
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
102
+ The width in pixels of the generated image.
103
+ num_inference_steps (`int`, *optional*, defaults to 50):
104
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
105
+ expense of slower inference.
106
+ guidance_scale (`float`, *optional*, defaults to 7.5):
107
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
108
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
109
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
110
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
111
+ usually at the expense of lower image quality.
112
+ negative_prompt (`str` or `List[str]`, *optional*):
113
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
114
+ if `guidance_scale` is less than `1`).
115
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
116
+ The number of images to generate per prompt.
117
+ eta (`float`, *optional*, defaults to 0.0):
118
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
119
+ [`schedulers.DDIMScheduler`], will be ignored for others.
120
+ generator (`torch.Generator`, *optional*):
121
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
122
+ to make generation deterministic.
123
+ latents (`torch.FloatTensor`, *optional*):
124
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
125
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
126
+ tensor will ge generated by sampling using the supplied random `generator`.
127
+ output_type (`str`, *optional*, defaults to `"pil"`):
128
+ The output format of the generate image. Choose between
129
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
130
+ return_dict (`bool`, *optional*, defaults to `True`):
131
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
132
+ plain tuple.
133
+ callback (`Callable`, *optional*):
134
+ A function that will be called every `callback_steps` steps during inference. The function will be
135
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
136
+ callback_steps (`int`, *optional*, defaults to 1):
137
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
138
+ called at every step.
139
+ cross_attention_kwargs (`dict`, *optional*):
140
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
141
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
142
+
143
+ The keyword arguments to configure the edit are:
144
+ - edit_type (`str`). The edit type to apply. Can be either of `replace`, `refine`, `reweight`.
145
+ - n_cross_replace (`int`): Number of diffusion steps in which cross attention should be replaced
146
+ - n_self_replace (`int`): Number of diffusion steps in which self attention should be replaced
147
+ - local_blend_words(`List[str]`, *optional*, default to `None`): Determines which area should be
148
+ changed. If None, then the whole image can be changed.
149
+ - equalizer_words(`List[str]`, *optional*, default to `None`): Required for edit type `reweight`.
150
+ Determines which words should be enhanced.
151
+ - equalizer_strengths (`List[float]`, *optional*, default to `None`) Required for edit type `reweight`.
152
+ Determines which how much the words in `equalizer_words` should be enhanced.
153
+
154
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
155
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
156
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
157
+ using zero terminal SNR.
158
+
159
+ Returns:
160
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
161
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
162
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
163
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
164
+ (nsfw) content, according to the `safety_checker`.
165
+ """
166
+
167
+ self.controller = create_controller(
168
+ prompt, cross_attention_kwargs, num_inference_steps, tokenizer=self.tokenizer, device=self.device
169
+ )
170
+ self.register_attention_control(self.controller) # add attention controller
171
+
172
+ # 0. Default height and width to unet
173
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
174
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
175
+
176
+ # 1. Check inputs. Raise error if not correct
177
+ self.check_inputs(prompt, height, width, callback_steps)
178
+
179
+ # 2. Define call parameters
180
+ if prompt is not None and isinstance(prompt, str):
181
+ batch_size = 1
182
+ elif prompt is not None and isinstance(prompt, list):
183
+ batch_size = len(prompt)
184
+ else:
185
+ batch_size = prompt_embeds.shape[0]
186
+
187
+ device = self._execution_device
188
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
189
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
190
+ # corresponds to doing no classifier free guidance.
191
+ do_classifier_free_guidance = guidance_scale > 1.0
192
+
193
+ # 3. Encode input prompt
194
+ text_encoder_lora_scale = (
195
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
196
+ )
197
+ prompt_embeds = self._encode_prompt(
198
+ prompt,
199
+ device,
200
+ num_images_per_prompt,
201
+ do_classifier_free_guidance,
202
+ negative_prompt,
203
+ prompt_embeds=prompt_embeds,
204
+ negative_prompt_embeds=negative_prompt_embeds,
205
+ lora_scale=text_encoder_lora_scale,
206
+ )
207
+
208
+ # 4. Prepare timesteps
209
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
210
+ timesteps = self.scheduler.timesteps
211
+
212
+ # 5. Prepare latent variables
213
+ num_channels_latents = self.unet.config.in_channels
214
+ latents = self.prepare_latents(
215
+ batch_size * num_images_per_prompt,
216
+ num_channels_latents,
217
+ height,
218
+ width,
219
+ prompt_embeds.dtype,
220
+ device,
221
+ generator,
222
+ latents,
223
+ )
224
+
225
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
226
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
227
+
228
+ # 7. Denoising loop
229
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
230
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
231
+ for i, t in enumerate(timesteps):
232
+ # expand the latents if we are doing classifier free guidance
233
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
234
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
235
+
236
+ # predict the noise residual
237
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
238
+
239
+ # perform guidance
240
+ if do_classifier_free_guidance:
241
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
242
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
243
+
244
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
245
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
246
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
247
+
248
+ # compute the previous noisy sample x_t -> x_t-1
249
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
250
+
251
+ # step callback
252
+ latents = self.controller.step_callback(latents)
253
+
254
+ # call the callback, if provided
255
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
256
+ progress_bar.update()
257
+ if callback is not None and i % callback_steps == 0:
258
+ step_idx = i // getattr(self.scheduler, "order", 1)
259
+ callback(step_idx, t, latents)
260
+
261
+ # 8. Post-processing
262
+ if not output_type == "latent":
263
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
264
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
265
+ else:
266
+ image = latents
267
+ has_nsfw_concept = None
268
+
269
+ # 9. Run safety checker
270
+ if has_nsfw_concept is None:
271
+ do_denormalize = [True] * image.shape[0]
272
+ else:
273
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
274
+
275
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
276
+
277
+ # Offload last model to CPU
278
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
279
+ self.final_offload_hook.offload()
280
+
281
+ if not return_dict:
282
+ return (image, has_nsfw_concept)
283
+
284
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
285
+
286
+ def register_attention_control(self, controller):
287
+ attn_procs = {}
288
+ cross_att_count = 0
289
+ for name in self.unet.attn_processors.keys():
290
+ None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim
291
+ if name.startswith("mid_block"):
292
+ self.unet.config.block_out_channels[-1]
293
+ place_in_unet = "mid"
294
+ elif name.startswith("up_blocks"):
295
+ block_id = int(name[len("up_blocks.")])
296
+ list(reversed(self.unet.config.block_out_channels))[block_id]
297
+ place_in_unet = "up"
298
+ elif name.startswith("down_blocks"):
299
+ block_id = int(name[len("down_blocks.")])
300
+ self.unet.config.block_out_channels[block_id]
301
+ place_in_unet = "down"
302
+ else:
303
+ continue
304
+ cross_att_count += 1
305
+ attn_procs[name] = P2PCrossAttnProcessor(controller=controller, place_in_unet=place_in_unet)
306
+
307
+ self.unet.set_attn_processor(attn_procs)
308
+ controller.num_att_layers = cross_att_count
309
+
310
+
311
+ class P2PCrossAttnProcessor:
312
+ def __init__(self, controller, place_in_unet):
313
+ super().__init__()
314
+ self.controller = controller
315
+ self.place_in_unet = place_in_unet
316
+
317
+ def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
318
+ batch_size, sequence_length, _ = hidden_states.shape
319
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
320
+
321
+ query = attn.to_q(hidden_states)
322
+
323
+ is_cross = encoder_hidden_states is not None
324
+ encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
325
+ key = attn.to_k(encoder_hidden_states)
326
+ value = attn.to_v(encoder_hidden_states)
327
+
328
+ query = attn.head_to_batch_dim(query)
329
+ key = attn.head_to_batch_dim(key)
330
+ value = attn.head_to_batch_dim(value)
331
+
332
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
333
+
334
+ # one line change
335
+ self.controller(attention_probs, is_cross, self.place_in_unet)
336
+
337
+ hidden_states = torch.bmm(attention_probs, value)
338
+ hidden_states = attn.batch_to_head_dim(hidden_states)
339
+
340
+ # linear proj
341
+ hidden_states = attn.to_out[0](hidden_states)
342
+ # dropout
343
+ hidden_states = attn.to_out[1](hidden_states)
344
+
345
+ return hidden_states
346
+
347
+
348
+ def create_controller(
349
+ prompts: List[str], cross_attention_kwargs: Dict, num_inference_steps: int, tokenizer, device
350
+ ) -> AttentionControl:
351
+ edit_type = cross_attention_kwargs.get("edit_type", None)
352
+ local_blend_words = cross_attention_kwargs.get("local_blend_words", None)
353
+ equalizer_words = cross_attention_kwargs.get("equalizer_words", None)
354
+ equalizer_strengths = cross_attention_kwargs.get("equalizer_strengths", None)
355
+ n_cross_replace = cross_attention_kwargs.get("n_cross_replace", 0.4)
356
+ n_self_replace = cross_attention_kwargs.get("n_self_replace", 0.4)
357
+
358
+ # only replace
359
+ if edit_type == "replace" and local_blend_words is None:
360
+ return AttentionReplace(
361
+ prompts, num_inference_steps, n_cross_replace, n_self_replace, tokenizer=tokenizer, device=device
362
+ )
363
+
364
+ # replace + localblend
365
+ if edit_type == "replace" and local_blend_words is not None:
366
+ lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device)
367
+ return AttentionReplace(
368
+ prompts, num_inference_steps, n_cross_replace, n_self_replace, lb, tokenizer=tokenizer, device=device
369
+ )
370
+
371
+ # only refine
372
+ if edit_type == "refine" and local_blend_words is None:
373
+ return AttentionRefine(
374
+ prompts, num_inference_steps, n_cross_replace, n_self_replace, tokenizer=tokenizer, device=device
375
+ )
376
+
377
+ # refine + localblend
378
+ if edit_type == "refine" and local_blend_words is not None:
379
+ lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device)
380
+ return AttentionRefine(
381
+ prompts, num_inference_steps, n_cross_replace, n_self_replace, lb, tokenizer=tokenizer, device=device
382
+ )
383
+
384
+ # reweight
385
+ if edit_type == "reweight":
386
+ assert (
387
+ equalizer_words is not None and equalizer_strengths is not None
388
+ ), "To use reweight edit, please specify equalizer_words and equalizer_strengths."
389
+ assert len(equalizer_words) == len(
390
+ equalizer_strengths
391
+ ), "equalizer_words and equalizer_strengths must be of same length."
392
+ equalizer = get_equalizer(prompts[1], equalizer_words, equalizer_strengths, tokenizer=tokenizer)
393
+ return AttentionReweight(
394
+ prompts,
395
+ num_inference_steps,
396
+ n_cross_replace,
397
+ n_self_replace,
398
+ tokenizer=tokenizer,
399
+ device=device,
400
+ equalizer=equalizer,
401
+ )
402
+
403
+ raise ValueError(f"Edit type {edit_type} not recognized. Use one of: replace, refine, reweight.")
404
+
405
+
406
+ class AttentionControl(abc.ABC):
407
+ def step_callback(self, x_t):
408
+ return x_t
409
+
410
+ def between_steps(self):
411
+ return
412
+
413
+ @property
414
+ def num_uncond_att_layers(self):
415
+ return 0
416
+
417
+ @abc.abstractmethod
418
+ def forward(self, attn, is_cross: bool, place_in_unet: str):
419
+ raise NotImplementedError
420
+
421
+ def __call__(self, attn, is_cross: bool, place_in_unet: str):
422
+ if self.cur_att_layer >= self.num_uncond_att_layers:
423
+ h = attn.shape[0]
424
+ attn[h // 2 :] = self.forward(attn[h // 2 :], is_cross, place_in_unet)
425
+ self.cur_att_layer += 1
426
+ if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:
427
+ self.cur_att_layer = 0
428
+ self.cur_step += 1
429
+ self.between_steps()
430
+ return attn
431
+
432
+ def reset(self):
433
+ self.cur_step = 0
434
+ self.cur_att_layer = 0
435
+
436
+ def __init__(self):
437
+ self.cur_step = 0
438
+ self.num_att_layers = -1
439
+ self.cur_att_layer = 0
440
+
441
+
442
+ class EmptyControl(AttentionControl):
443
+ def forward(self, attn, is_cross: bool, place_in_unet: str):
444
+ return attn
445
+
446
+
447
+ class AttentionStore(AttentionControl):
448
+ @staticmethod
449
+ def get_empty_store():
450
+ return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []}
451
+
452
+ def forward(self, attn, is_cross: bool, place_in_unet: str):
453
+ key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
454
+ if attn.shape[1] <= 32**2: # avoid memory overhead
455
+ self.step_store[key].append(attn)
456
+ return attn
457
+
458
+ def between_steps(self):
459
+ if len(self.attention_store) == 0:
460
+ self.attention_store = self.step_store
461
+ else:
462
+ for key in self.attention_store:
463
+ for i in range(len(self.attention_store[key])):
464
+ self.attention_store[key][i] += self.step_store[key][i]
465
+ self.step_store = self.get_empty_store()
466
+
467
+ def get_average_attention(self):
468
+ average_attention = {
469
+ key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store
470
+ }
471
+ return average_attention
472
+
473
+ def reset(self):
474
+ super(AttentionStore, self).reset()
475
+ self.step_store = self.get_empty_store()
476
+ self.attention_store = {}
477
+
478
+ def __init__(self):
479
+ super(AttentionStore, self).__init__()
480
+ self.step_store = self.get_empty_store()
481
+ self.attention_store = {}
482
+
483
+
484
+ class LocalBlend:
485
+ def __call__(self, x_t, attention_store):
486
+ k = 1
487
+ maps = attention_store["down_cross"][2:4] + attention_store["up_cross"][:3]
488
+ maps = [item.reshape(self.alpha_layers.shape[0], -1, 1, 16, 16, self.max_num_words) for item in maps]
489
+ maps = torch.cat(maps, dim=1)
490
+ maps = (maps * self.alpha_layers).sum(-1).mean(1)
491
+ mask = F.max_pool2d(maps, (k * 2 + 1, k * 2 + 1), (1, 1), padding=(k, k))
492
+ mask = F.interpolate(mask, size=(x_t.shape[2:]))
493
+ mask = mask / mask.max(2, keepdims=True)[0].max(3, keepdims=True)[0]
494
+ mask = mask.gt(self.threshold)
495
+ mask = (mask[:1] + mask[1:]).float()
496
+ x_t = x_t[:1] + mask * (x_t - x_t[:1])
497
+ return x_t
498
+
499
+ def __init__(
500
+ self, prompts: List[str], words: [List[List[str]]], tokenizer, device, threshold=0.3, max_num_words=77
501
+ ):
502
+ self.max_num_words = 77
503
+
504
+ alpha_layers = torch.zeros(len(prompts), 1, 1, 1, 1, self.max_num_words)
505
+ for i, (prompt, words_) in enumerate(zip(prompts, words)):
506
+ if isinstance(words_, str):
507
+ words_ = [words_]
508
+ for word in words_:
509
+ ind = get_word_inds(prompt, word, tokenizer)
510
+ alpha_layers[i, :, :, :, :, ind] = 1
511
+ self.alpha_layers = alpha_layers.to(device)
512
+ self.threshold = threshold
513
+
514
+
515
+ class AttentionControlEdit(AttentionStore, abc.ABC):
516
+ def step_callback(self, x_t):
517
+ if self.local_blend is not None:
518
+ x_t = self.local_blend(x_t, self.attention_store)
519
+ return x_t
520
+
521
+ def replace_self_attention(self, attn_base, att_replace):
522
+ if att_replace.shape[2] <= 16**2:
523
+ return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape)
524
+ else:
525
+ return att_replace
526
+
527
+ @abc.abstractmethod
528
+ def replace_cross_attention(self, attn_base, att_replace):
529
+ raise NotImplementedError
530
+
531
+ def forward(self, attn, is_cross: bool, place_in_unet: str):
532
+ super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet)
533
+ # FIXME not replace correctly
534
+ if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]):
535
+ h = attn.shape[0] // (self.batch_size)
536
+ attn = attn.reshape(self.batch_size, h, *attn.shape[1:])
537
+ attn_base, attn_repalce = attn[0], attn[1:]
538
+ if is_cross:
539
+ alpha_words = self.cross_replace_alpha[self.cur_step]
540
+ attn_repalce_new = (
541
+ self.replace_cross_attention(attn_base, attn_repalce) * alpha_words
542
+ + (1 - alpha_words) * attn_repalce
543
+ )
544
+ attn[1:] = attn_repalce_new
545
+ else:
546
+ attn[1:] = self.replace_self_attention(attn_base, attn_repalce)
547
+ attn = attn.reshape(self.batch_size * h, *attn.shape[2:])
548
+ return attn
549
+
550
+ def __init__(
551
+ self,
552
+ prompts,
553
+ num_steps: int,
554
+ cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],
555
+ self_replace_steps: Union[float, Tuple[float, float]],
556
+ local_blend: Optional[LocalBlend],
557
+ tokenizer,
558
+ device,
559
+ ):
560
+ super(AttentionControlEdit, self).__init__()
561
+ # add tokenizer and device here
562
+
563
+ self.tokenizer = tokenizer
564
+ self.device = device
565
+
566
+ self.batch_size = len(prompts)
567
+ self.cross_replace_alpha = get_time_words_attention_alpha(
568
+ prompts, num_steps, cross_replace_steps, self.tokenizer
569
+ ).to(self.device)
570
+ if isinstance(self_replace_steps, float):
571
+ self_replace_steps = 0, self_replace_steps
572
+ self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1])
573
+ self.local_blend = local_blend # 在外面定义后传进来
574
+
575
+
576
+ class AttentionReplace(AttentionControlEdit):
577
+ def replace_cross_attention(self, attn_base, att_replace):
578
+ return torch.einsum("hpw,bwn->bhpn", attn_base, self.mapper)
579
+
580
+ def __init__(
581
+ self,
582
+ prompts,
583
+ num_steps: int,
584
+ cross_replace_steps: float,
585
+ self_replace_steps: float,
586
+ local_blend: Optional[LocalBlend] = None,
587
+ tokenizer=None,
588
+ device=None,
589
+ ):
590
+ super(AttentionReplace, self).__init__(
591
+ prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device
592
+ )
593
+ self.mapper = get_replacement_mapper(prompts, self.tokenizer).to(self.device)
594
+
595
+
596
+ class AttentionRefine(AttentionControlEdit):
597
+ def replace_cross_attention(self, attn_base, att_replace):
598
+ attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3)
599
+ attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas)
600
+ return attn_replace
601
+
602
+ def __init__(
603
+ self,
604
+ prompts,
605
+ num_steps: int,
606
+ cross_replace_steps: float,
607
+ self_replace_steps: float,
608
+ local_blend: Optional[LocalBlend] = None,
609
+ tokenizer=None,
610
+ device=None,
611
+ ):
612
+ super(AttentionRefine, self).__init__(
613
+ prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device
614
+ )
615
+ self.mapper, alphas = get_refinement_mapper(prompts, self.tokenizer)
616
+ self.mapper, alphas = self.mapper.to(self.device), alphas.to(self.device)
617
+ self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1])
618
+
619
+
620
+ class AttentionReweight(AttentionControlEdit):
621
+ def replace_cross_attention(self, attn_base, att_replace):
622
+ if self.prev_controller is not None:
623
+ attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace)
624
+ attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :]
625
+ return attn_replace
626
+
627
+ def __init__(
628
+ self,
629
+ prompts,
630
+ num_steps: int,
631
+ cross_replace_steps: float,
632
+ self_replace_steps: float,
633
+ equalizer,
634
+ local_blend: Optional[LocalBlend] = None,
635
+ controller: Optional[AttentionControlEdit] = None,
636
+ tokenizer=None,
637
+ device=None,
638
+ ):
639
+ super(AttentionReweight, self).__init__(
640
+ prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device
641
+ )
642
+ self.equalizer = equalizer.to(self.device)
643
+ self.prev_controller = controller
644
+
645
+
646
+ ### util functions for all Edits
647
+ def update_alpha_time_word(
648
+ alpha, bounds: Union[float, Tuple[float, float]], prompt_ind: int, word_inds: Optional[torch.Tensor] = None
649
+ ):
650
+ if isinstance(bounds, float):
651
+ bounds = 0, bounds
652
+ start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0])
653
+ if word_inds is None:
654
+ word_inds = torch.arange(alpha.shape[2])
655
+ alpha[:start, prompt_ind, word_inds] = 0
656
+ alpha[start:end, prompt_ind, word_inds] = 1
657
+ alpha[end:, prompt_ind, word_inds] = 0
658
+ return alpha
659
+
660
+
661
+ def get_time_words_attention_alpha(
662
+ prompts, num_steps, cross_replace_steps: Union[float, Dict[str, Tuple[float, float]]], tokenizer, max_num_words=77
663
+ ):
664
+ if not isinstance(cross_replace_steps, dict):
665
+ cross_replace_steps = {"default_": cross_replace_steps}
666
+ if "default_" not in cross_replace_steps:
667
+ cross_replace_steps["default_"] = (0.0, 1.0)
668
+ alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words)
669
+ for i in range(len(prompts) - 1):
670
+ alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"], i)
671
+ for key, item in cross_replace_steps.items():
672
+ if key != "default_":
673
+ inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))]
674
+ for i, ind in enumerate(inds):
675
+ if len(ind) > 0:
676
+ alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind)
677
+ alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words)
678
+ return alpha_time_words
679
+
680
+
681
+ ### util functions for LocalBlend and ReplacementEdit
682
+ def get_word_inds(text: str, word_place: int, tokenizer):
683
+ split_text = text.split(" ")
684
+ if isinstance(word_place, str):
685
+ word_place = [i for i, word in enumerate(split_text) if word_place == word]
686
+ elif isinstance(word_place, int):
687
+ word_place = [word_place]
688
+ out = []
689
+ if len(word_place) > 0:
690
+ words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1]
691
+ cur_len, ptr = 0, 0
692
+
693
+ for i in range(len(words_encode)):
694
+ cur_len += len(words_encode[i])
695
+ if ptr in word_place:
696
+ out.append(i + 1)
697
+ if cur_len >= len(split_text[ptr]):
698
+ ptr += 1
699
+ cur_len = 0
700
+ return np.array(out)
701
+
702
+
703
+ ### util functions for ReplacementEdit
704
+ def get_replacement_mapper_(x: str, y: str, tokenizer, max_len=77):
705
+ words_x = x.split(" ")
706
+ words_y = y.split(" ")
707
+ if len(words_x) != len(words_y):
708
+ raise ValueError(
709
+ f"attention replacement edit can only be applied on prompts with the same length"
710
+ f" but prompt A has {len(words_x)} words and prompt B has {len(words_y)} words."
711
+ )
712
+ inds_replace = [i for i in range(len(words_y)) if words_y[i] != words_x[i]]
713
+ inds_source = [get_word_inds(x, i, tokenizer) for i in inds_replace]
714
+ inds_target = [get_word_inds(y, i, tokenizer) for i in inds_replace]
715
+ mapper = np.zeros((max_len, max_len))
716
+ i = j = 0
717
+ cur_inds = 0
718
+ while i < max_len and j < max_len:
719
+ if cur_inds < len(inds_source) and inds_source[cur_inds][0] == i:
720
+ inds_source_, inds_target_ = inds_source[cur_inds], inds_target[cur_inds]
721
+ if len(inds_source_) == len(inds_target_):
722
+ mapper[inds_source_, inds_target_] = 1
723
+ else:
724
+ ratio = 1 / len(inds_target_)
725
+ for i_t in inds_target_:
726
+ mapper[inds_source_, i_t] = ratio
727
+ cur_inds += 1
728
+ i += len(inds_source_)
729
+ j += len(inds_target_)
730
+ elif cur_inds < len(inds_source):
731
+ mapper[i, j] = 1
732
+ i += 1
733
+ j += 1
734
+ else:
735
+ mapper[j, j] = 1
736
+ i += 1
737
+ j += 1
738
+
739
+ return torch.from_numpy(mapper).float()
740
+
741
+
742
+ def get_replacement_mapper(prompts, tokenizer, max_len=77):
743
+ x_seq = prompts[0]
744
+ mappers = []
745
+ for i in range(1, len(prompts)):
746
+ mapper = get_replacement_mapper_(x_seq, prompts[i], tokenizer, max_len)
747
+ mappers.append(mapper)
748
+ return torch.stack(mappers)
749
+
750
+
751
+ ### util functions for ReweightEdit
752
+ def get_equalizer(
753
+ text: str, word_select: Union[int, Tuple[int, ...]], values: Union[List[float], Tuple[float, ...]], tokenizer
754
+ ):
755
+ if isinstance(word_select, (int, str)):
756
+ word_select = (word_select,)
757
+ equalizer = torch.ones(len(values), 77)
758
+ values = torch.tensor(values, dtype=torch.float32)
759
+ for word in word_select:
760
+ inds = get_word_inds(text, word, tokenizer)
761
+ equalizer[:, inds] = values
762
+ return equalizer
763
+
764
+
765
+ ### util functions for RefinementEdit
766
+ class ScoreParams:
767
+ def __init__(self, gap, match, mismatch):
768
+ self.gap = gap
769
+ self.match = match
770
+ self.mismatch = mismatch
771
+
772
+ def mis_match_char(self, x, y):
773
+ if x != y:
774
+ return self.mismatch
775
+ else:
776
+ return self.match
777
+
778
+
779
+ def get_matrix(size_x, size_y, gap):
780
+ matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32)
781
+ matrix[0, 1:] = (np.arange(size_y) + 1) * gap
782
+ matrix[1:, 0] = (np.arange(size_x) + 1) * gap
783
+ return matrix
784
+
785
+
786
+ def get_traceback_matrix(size_x, size_y):
787
+ matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32)
788
+ matrix[0, 1:] = 1
789
+ matrix[1:, 0] = 2
790
+ matrix[0, 0] = 4
791
+ return matrix
792
+
793
+
794
+ def global_align(x, y, score):
795
+ matrix = get_matrix(len(x), len(y), score.gap)
796
+ trace_back = get_traceback_matrix(len(x), len(y))
797
+ for i in range(1, len(x) + 1):
798
+ for j in range(1, len(y) + 1):
799
+ left = matrix[i, j - 1] + score.gap
800
+ up = matrix[i - 1, j] + score.gap
801
+ diag = matrix[i - 1, j - 1] + score.mis_match_char(x[i - 1], y[j - 1])
802
+ matrix[i, j] = max(left, up, diag)
803
+ if matrix[i, j] == left:
804
+ trace_back[i, j] = 1
805
+ elif matrix[i, j] == up:
806
+ trace_back[i, j] = 2
807
+ else:
808
+ trace_back[i, j] = 3
809
+ return matrix, trace_back
810
+
811
+
812
+ def get_aligned_sequences(x, y, trace_back):
813
+ x_seq = []
814
+ y_seq = []
815
+ i = len(x)
816
+ j = len(y)
817
+ mapper_y_to_x = []
818
+ while i > 0 or j > 0:
819
+ if trace_back[i, j] == 3:
820
+ x_seq.append(x[i - 1])
821
+ y_seq.append(y[j - 1])
822
+ i = i - 1
823
+ j = j - 1
824
+ mapper_y_to_x.append((j, i))
825
+ elif trace_back[i][j] == 1:
826
+ x_seq.append("-")
827
+ y_seq.append(y[j - 1])
828
+ j = j - 1
829
+ mapper_y_to_x.append((j, -1))
830
+ elif trace_back[i][j] == 2:
831
+ x_seq.append(x[i - 1])
832
+ y_seq.append("-")
833
+ i = i - 1
834
+ elif trace_back[i][j] == 4:
835
+ break
836
+ mapper_y_to_x.reverse()
837
+ return x_seq, y_seq, torch.tensor(mapper_y_to_x, dtype=torch.int64)
838
+
839
+
840
+ def get_mapper(x: str, y: str, tokenizer, max_len=77):
841
+ x_seq = tokenizer.encode(x)
842
+ y_seq = tokenizer.encode(y)
843
+ score = ScoreParams(0, 1, -1)
844
+ matrix, trace_back = global_align(x_seq, y_seq, score)
845
+ mapper_base = get_aligned_sequences(x_seq, y_seq, trace_back)[-1]
846
+ alphas = torch.ones(max_len)
847
+ alphas[: mapper_base.shape[0]] = mapper_base[:, 1].ne(-1).float()
848
+ mapper = torch.zeros(max_len, dtype=torch.int64)
849
+ mapper[: mapper_base.shape[0]] = mapper_base[:, 1]
850
+ mapper[mapper_base.shape[0] :] = len(y_seq) + torch.arange(max_len - len(y_seq))
851
+ return mapper, alphas
852
+
853
+
854
+ def get_refinement_mapper(prompts, tokenizer, max_len=77):
855
+ x_seq = prompts[0]
856
+ mappers, alphas = [], []
857
+ for i in range(1, len(prompts)):
858
+ mapper, alpha = get_mapper(x_seq, prompts[i], tokenizer, max_len)
859
+ mappers.append(mapper)
860
+ alphas.append(alpha)
861
+ return torch.stack(mappers), torch.stack(alphas)
v0.24.0/pipeline_stable_diffusion_upscale_ldm3d.py ADDED
@@ -0,0 +1,772 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Intel Labs Team Authors and the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import PIL
20
+ import torch
21
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
22
+
23
+ from diffusers import DiffusionPipeline
24
+ from diffusers.image_processor import PipelineDepthInput, PipelineImageInput, VaeImageProcessorLDM3D
25
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
27
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
28
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
29
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d import LDM3DPipelineOutput
30
+ from diffusers.schedulers import DDPMScheduler, KarrasDiffusionSchedulers
31
+ from diffusers.utils import (
32
+ USE_PEFT_BACKEND,
33
+ deprecate,
34
+ logging,
35
+ scale_lora_layers,
36
+ unscale_lora_layers,
37
+ )
38
+ from diffusers.utils.torch_utils import randn_tensor
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+ EXAMPLE_DOC_STRING = """
44
+ Examples:
45
+ ```python
46
+ >>> from diffusers import StableDiffusionUpscaleLDM3DPipeline
47
+ >>> from PIL import Image
48
+ >>> from io import BytesIO
49
+ >>> import requests
50
+
51
+ >>> pipe = StableDiffusionUpscaleLDM3DPipeline.from_pretrained("Intel/ldm3d-sr")
52
+ >>> pipe = pipe.to("cuda")
53
+ >>> rgb_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_rgb.jpg"
54
+ >>> depth_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_depth.png"
55
+ >>> low_res_rgb = Image.open(BytesIO(requests.get(rgb_path).content)).convert("RGB")
56
+ >>> low_res_depth = Image.open(BytesIO(requests.get(depth_path).content)).convert("L")
57
+ >>> output = pipe(
58
+ ... prompt="high quality high resolution uhd 4k image",
59
+ ... rgb=low_res_rgb,
60
+ ... depth=low_res_depth,
61
+ ... num_inference_steps=50,
62
+ ... target_res=[1024, 1024],
63
+ ... )
64
+ >>> rgb_image, depth_image = output.rgb, output.depth
65
+ >>> rgb_image[0].save("hr_ldm3d_rgb.jpg")
66
+ >>> depth_image[0].save("hr_ldm3d_depth.png")
67
+ ```
68
+ """
69
+
70
+
71
+ class StableDiffusionUpscaleLDM3DPipeline(
72
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
73
+ ):
74
+ r"""
75
+ Pipeline for text-to-image and 3D generation using LDM3D.
76
+
77
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
78
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
79
+
80
+ The pipeline also inherits the following loading methods:
81
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
82
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
83
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
84
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
85
+
86
+ Args:
87
+ vae ([`AutoencoderKL`]):
88
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
89
+ text_encoder ([`~transformers.CLIPTextModel`]):
90
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
91
+ tokenizer ([`~transformers.CLIPTokenizer`]):
92
+ A `CLIPTokenizer` to tokenize text.
93
+ unet ([`UNet2DConditionModel`]):
94
+ A `UNet2DConditionModel` to denoise the encoded image latents.
95
+ low_res_scheduler ([`SchedulerMixin`]):
96
+ A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of
97
+ [`DDPMScheduler`].
98
+ scheduler ([`SchedulerMixin`]):
99
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
100
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
101
+ safety_checker ([`StableDiffusionSafetyChecker`]):
102
+ Classification module that estimates whether generated images could be considered offensive or harmful.
103
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
104
+ about a model's potential harms.
105
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
106
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
107
+ """
108
+
109
+ _optional_components = ["safety_checker", "feature_extractor"]
110
+
111
+ def __init__(
112
+ self,
113
+ vae: AutoencoderKL,
114
+ text_encoder: CLIPTextModel,
115
+ tokenizer: CLIPTokenizer,
116
+ unet: UNet2DConditionModel,
117
+ low_res_scheduler: DDPMScheduler,
118
+ scheduler: KarrasDiffusionSchedulers,
119
+ safety_checker: StableDiffusionSafetyChecker,
120
+ feature_extractor: CLIPImageProcessor,
121
+ requires_safety_checker: bool = True,
122
+ watermarker: Optional[Any] = None,
123
+ max_noise_level: int = 350,
124
+ ):
125
+ super().__init__()
126
+
127
+ if safety_checker is None and requires_safety_checker:
128
+ logger.warning(
129
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
130
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
131
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
132
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
133
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
134
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
135
+ )
136
+
137
+ if safety_checker is not None and feature_extractor is None:
138
+ raise ValueError(
139
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
140
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
141
+ )
142
+
143
+ self.register_modules(
144
+ vae=vae,
145
+ text_encoder=text_encoder,
146
+ tokenizer=tokenizer,
147
+ unet=unet,
148
+ low_res_scheduler=low_res_scheduler,
149
+ scheduler=scheduler,
150
+ safety_checker=safety_checker,
151
+ watermarker=watermarker,
152
+ feature_extractor=feature_extractor,
153
+ )
154
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
155
+ self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor, resample="bilinear")
156
+ # self.register_to_config(requires_safety_checker=requires_safety_checker)
157
+ self.register_to_config(max_noise_level=max_noise_level)
158
+
159
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline._encode_prompt
160
+ def _encode_prompt(
161
+ self,
162
+ prompt,
163
+ device,
164
+ num_images_per_prompt,
165
+ do_classifier_free_guidance,
166
+ negative_prompt=None,
167
+ prompt_embeds: Optional[torch.FloatTensor] = None,
168
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
169
+ lora_scale: Optional[float] = None,
170
+ **kwargs,
171
+ ):
172
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
173
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
174
+
175
+ prompt_embeds_tuple = self.encode_prompt(
176
+ prompt=prompt,
177
+ device=device,
178
+ num_images_per_prompt=num_images_per_prompt,
179
+ do_classifier_free_guidance=do_classifier_free_guidance,
180
+ negative_prompt=negative_prompt,
181
+ prompt_embeds=prompt_embeds,
182
+ negative_prompt_embeds=negative_prompt_embeds,
183
+ lora_scale=lora_scale,
184
+ **kwargs,
185
+ )
186
+
187
+ # concatenate for backwards comp
188
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
189
+
190
+ return prompt_embeds
191
+
192
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline.encode_prompt
193
+ def encode_prompt(
194
+ self,
195
+ prompt,
196
+ device,
197
+ num_images_per_prompt,
198
+ do_classifier_free_guidance,
199
+ negative_prompt=None,
200
+ prompt_embeds: Optional[torch.FloatTensor] = None,
201
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
202
+ lora_scale: Optional[float] = None,
203
+ clip_skip: Optional[int] = None,
204
+ ):
205
+ r"""
206
+ Encodes the prompt into text encoder hidden states.
207
+
208
+ Args:
209
+ prompt (`str` or `List[str]`, *optional*):
210
+ prompt to be encoded
211
+ device: (`torch.device`):
212
+ torch device
213
+ num_images_per_prompt (`int`):
214
+ number of images that should be generated per prompt
215
+ do_classifier_free_guidance (`bool`):
216
+ whether to use classifier free guidance or not
217
+ negative_prompt (`str` or `List[str]`, *optional*):
218
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
219
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
220
+ less than `1`).
221
+ prompt_embeds (`torch.FloatTensor`, *optional*):
222
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
223
+ provided, text embeddings will be generated from `prompt` input argument.
224
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
225
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
226
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
227
+ argument.
228
+ lora_scale (`float`, *optional*):
229
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
230
+ clip_skip (`int`, *optional*):
231
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
232
+ the output of the pre-final layer will be used for computing the prompt embeddings.
233
+ """
234
+ # set lora scale so that monkey patched LoRA
235
+ # function of text encoder can correctly access it
236
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
237
+ self._lora_scale = lora_scale
238
+
239
+ # dynamically adjust the LoRA scale
240
+ if not USE_PEFT_BACKEND:
241
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
242
+ else:
243
+ scale_lora_layers(self.text_encoder, lora_scale)
244
+
245
+ if prompt is not None and isinstance(prompt, str):
246
+ batch_size = 1
247
+ elif prompt is not None and isinstance(prompt, list):
248
+ batch_size = len(prompt)
249
+ else:
250
+ batch_size = prompt_embeds.shape[0]
251
+
252
+ if prompt_embeds is None:
253
+ # textual inversion: procecss multi-vector tokens if necessary
254
+ if isinstance(self, TextualInversionLoaderMixin):
255
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
256
+
257
+ text_inputs = self.tokenizer(
258
+ prompt,
259
+ padding="max_length",
260
+ max_length=self.tokenizer.model_max_length,
261
+ truncation=True,
262
+ return_tensors="pt",
263
+ )
264
+ text_input_ids = text_inputs.input_ids
265
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
266
+
267
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
268
+ text_input_ids, untruncated_ids
269
+ ):
270
+ removed_text = self.tokenizer.batch_decode(
271
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
272
+ )
273
+ logger.warning(
274
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
275
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
276
+ )
277
+
278
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
279
+ attention_mask = text_inputs.attention_mask.to(device)
280
+ else:
281
+ attention_mask = None
282
+
283
+ if clip_skip is None:
284
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
285
+ prompt_embeds = prompt_embeds[0]
286
+ else:
287
+ prompt_embeds = self.text_encoder(
288
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
289
+ )
290
+ # Access the `hidden_states` first, that contains a tuple of
291
+ # all the hidden states from the encoder layers. Then index into
292
+ # the tuple to access the hidden states from the desired layer.
293
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
294
+ # We also need to apply the final LayerNorm here to not mess with the
295
+ # representations. The `last_hidden_states` that we typically use for
296
+ # obtaining the final prompt representations passes through the LayerNorm
297
+ # layer.
298
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
299
+
300
+ if self.text_encoder is not None:
301
+ prompt_embeds_dtype = self.text_encoder.dtype
302
+ elif self.unet is not None:
303
+ prompt_embeds_dtype = self.unet.dtype
304
+ else:
305
+ prompt_embeds_dtype = prompt_embeds.dtype
306
+
307
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
308
+
309
+ bs_embed, seq_len, _ = prompt_embeds.shape
310
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
311
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
312
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
313
+
314
+ # get unconditional embeddings for classifier free guidance
315
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
316
+ uncond_tokens: List[str]
317
+ if negative_prompt is None:
318
+ uncond_tokens = [""] * batch_size
319
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
320
+ raise TypeError(
321
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
322
+ f" {type(prompt)}."
323
+ )
324
+ elif isinstance(negative_prompt, str):
325
+ uncond_tokens = [negative_prompt]
326
+ elif batch_size != len(negative_prompt):
327
+ raise ValueError(
328
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
329
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
330
+ " the batch size of `prompt`."
331
+ )
332
+ else:
333
+ uncond_tokens = negative_prompt
334
+
335
+ # textual inversion: procecss multi-vector tokens if necessary
336
+ if isinstance(self, TextualInversionLoaderMixin):
337
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
338
+
339
+ max_length = prompt_embeds.shape[1]
340
+ uncond_input = self.tokenizer(
341
+ uncond_tokens,
342
+ padding="max_length",
343
+ max_length=max_length,
344
+ truncation=True,
345
+ return_tensors="pt",
346
+ )
347
+
348
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
349
+ attention_mask = uncond_input.attention_mask.to(device)
350
+ else:
351
+ attention_mask = None
352
+
353
+ negative_prompt_embeds = self.text_encoder(
354
+ uncond_input.input_ids.to(device),
355
+ attention_mask=attention_mask,
356
+ )
357
+ negative_prompt_embeds = negative_prompt_embeds[0]
358
+
359
+ if do_classifier_free_guidance:
360
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
361
+ seq_len = negative_prompt_embeds.shape[1]
362
+
363
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
364
+
365
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
366
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
367
+
368
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
369
+ # Retrieve the original scale by scaling back the LoRA layers
370
+ unscale_lora_layers(self.text_encoder, lora_scale)
371
+
372
+ return prompt_embeds, negative_prompt_embeds
373
+
374
+ def run_safety_checker(self, image, device, dtype):
375
+ if self.safety_checker is None:
376
+ has_nsfw_concept = None
377
+ else:
378
+ if torch.is_tensor(image):
379
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
380
+ else:
381
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
382
+ rgb_feature_extractor_input = feature_extractor_input[0]
383
+ safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors="pt").to(device)
384
+ image, has_nsfw_concept = self.safety_checker(
385
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
386
+ )
387
+ return image, has_nsfw_concept
388
+
389
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
390
+ def prepare_extra_step_kwargs(self, generator, eta):
391
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
392
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
393
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
394
+ # and should be between [0, 1]
395
+
396
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
397
+ extra_step_kwargs = {}
398
+ if accepts_eta:
399
+ extra_step_kwargs["eta"] = eta
400
+
401
+ # check if the scheduler accepts generator
402
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
403
+ if accepts_generator:
404
+ extra_step_kwargs["generator"] = generator
405
+ return extra_step_kwargs
406
+
407
+ def check_inputs(
408
+ self,
409
+ prompt,
410
+ image,
411
+ noise_level,
412
+ callback_steps,
413
+ negative_prompt=None,
414
+ prompt_embeds=None,
415
+ negative_prompt_embeds=None,
416
+ target_res=None,
417
+ ):
418
+ if (callback_steps is None) or (
419
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
420
+ ):
421
+ raise ValueError(
422
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
423
+ f" {type(callback_steps)}."
424
+ )
425
+
426
+ if prompt is not None and prompt_embeds is not None:
427
+ raise ValueError(
428
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
429
+ " only forward one of the two."
430
+ )
431
+ elif prompt is None and prompt_embeds is None:
432
+ raise ValueError(
433
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
434
+ )
435
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
436
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
437
+
438
+ if negative_prompt is not None and negative_prompt_embeds is not None:
439
+ raise ValueError(
440
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
441
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
442
+ )
443
+
444
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
445
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
446
+ raise ValueError(
447
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
448
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
449
+ f" {negative_prompt_embeds.shape}."
450
+ )
451
+
452
+ if (
453
+ not isinstance(image, torch.Tensor)
454
+ and not isinstance(image, PIL.Image.Image)
455
+ and not isinstance(image, np.ndarray)
456
+ and not isinstance(image, list)
457
+ ):
458
+ raise ValueError(
459
+ f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}"
460
+ )
461
+
462
+ # verify batch size of prompt and image are same if image is a list or tensor or numpy array
463
+ if isinstance(image, list) or isinstance(image, torch.Tensor) or isinstance(image, np.ndarray):
464
+ if prompt is not None and isinstance(prompt, str):
465
+ batch_size = 1
466
+ elif prompt is not None and isinstance(prompt, list):
467
+ batch_size = len(prompt)
468
+ else:
469
+ batch_size = prompt_embeds.shape[0]
470
+
471
+ if isinstance(image, list):
472
+ image_batch_size = len(image)
473
+ else:
474
+ image_batch_size = image.shape[0]
475
+ if batch_size != image_batch_size:
476
+ raise ValueError(
477
+ f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}."
478
+ " Please make sure that passed `prompt` matches the batch size of `image`."
479
+ )
480
+
481
+ # check noise level
482
+ if noise_level > self.config.max_noise_level:
483
+ raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}")
484
+
485
+ if (callback_steps is None) or (
486
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
487
+ ):
488
+ raise ValueError(
489
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
490
+ f" {type(callback_steps)}."
491
+ )
492
+
493
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
494
+ shape = (batch_size, num_channels_latents, height, width)
495
+ if latents is None:
496
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
497
+ else:
498
+ if latents.shape != shape:
499
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
500
+ latents = latents.to(device)
501
+
502
+ # scale the initial noise by the standard deviation required by the scheduler
503
+ latents = latents * self.scheduler.init_noise_sigma
504
+ return latents
505
+
506
+ # def upcast_vae(self):
507
+ # dtype = self.vae.dtype
508
+ # self.vae.to(dtype=torch.float32)
509
+ # use_torch_2_0_or_xformers = isinstance(
510
+ # self.vae.decoder.mid_block.attentions[0].processor,
511
+ # (
512
+ # AttnProcessor2_0,
513
+ # XFormersAttnProcessor,
514
+ # LoRAXFormersAttnProcessor,
515
+ # LoRAAttnProcessor2_0,
516
+ # ),
517
+ # )
518
+ # # if xformers or torch_2_0 is used attention block does not need
519
+ # # to be in float32 which can save lots of memory
520
+ # if use_torch_2_0_or_xformers:
521
+ # self.vae.post_quant_conv.to(dtype)
522
+ # self.vae.decoder.conv_in.to(dtype)
523
+ # self.vae.decoder.mid_block.to(dtype)
524
+
525
+ @torch.no_grad()
526
+ def __call__(
527
+ self,
528
+ prompt: Union[str, List[str]] = None,
529
+ rgb: PipelineImageInput = None,
530
+ depth: PipelineDepthInput = None,
531
+ num_inference_steps: int = 75,
532
+ guidance_scale: float = 9.0,
533
+ noise_level: int = 20,
534
+ negative_prompt: Optional[Union[str, List[str]]] = None,
535
+ num_images_per_prompt: Optional[int] = 1,
536
+ eta: float = 0.0,
537
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
538
+ latents: Optional[torch.FloatTensor] = None,
539
+ prompt_embeds: Optional[torch.FloatTensor] = None,
540
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
541
+ output_type: Optional[str] = "pil",
542
+ return_dict: bool = True,
543
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
544
+ callback_steps: int = 1,
545
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
546
+ target_res: Optional[List[int]] = [1024, 1024],
547
+ ):
548
+ r"""
549
+ The call function to the pipeline for generation.
550
+
551
+ Args:
552
+ prompt (`str` or `List[str]`, *optional*):
553
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
554
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
555
+ `Image` or tensor representing an image batch to be upscaled.
556
+ num_inference_steps (`int`, *optional*, defaults to 50):
557
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
558
+ expense of slower inference.
559
+ guidance_scale (`float`, *optional*, defaults to 5.0):
560
+ A higher guidance scale value encourages the model to generate images closely linked to the text
561
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
562
+ negative_prompt (`str` or `List[str]`, *optional*):
563
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
564
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
565
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
566
+ The number of images to generate per prompt.
567
+ eta (`float`, *optional*, defaults to 0.0):
568
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
569
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
570
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
571
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
572
+ generation deterministic.
573
+ latents (`torch.FloatTensor`, *optional*):
574
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
575
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
576
+ tensor is generated by sampling using the supplied random `generator`.
577
+ prompt_embeds (`torch.FloatTensor`, *optional*):
578
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
579
+ provided, text embeddings are generated from the `prompt` input argument.
580
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
581
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
582
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
583
+ output_type (`str`, *optional*, defaults to `"pil"`):
584
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
585
+ return_dict (`bool`, *optional*, defaults to `True`):
586
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
587
+ plain tuple.
588
+ callback (`Callable`, *optional*):
589
+ A function that calls every `callback_steps` steps during inference. The function is called with the
590
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
591
+ callback_steps (`int`, *optional*, defaults to 1):
592
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
593
+ every step.
594
+ cross_attention_kwargs (`dict`, *optional*):
595
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
596
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
597
+
598
+ Examples:
599
+
600
+ Returns:
601
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
602
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
603
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
604
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
605
+ "not-safe-for-work" (nsfw) content.
606
+ """
607
+ # 1. Check inputs. Raise error if not correct
608
+ self.check_inputs(
609
+ prompt,
610
+ rgb,
611
+ noise_level,
612
+ callback_steps,
613
+ negative_prompt,
614
+ prompt_embeds,
615
+ negative_prompt_embeds,
616
+ )
617
+ # 2. Define call parameters
618
+ if prompt is not None and isinstance(prompt, str):
619
+ batch_size = 1
620
+ elif prompt is not None and isinstance(prompt, list):
621
+ batch_size = len(prompt)
622
+ else:
623
+ batch_size = prompt_embeds.shape[0]
624
+
625
+ device = self._execution_device
626
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
627
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
628
+ # corresponds to doing no classifier free guidance.
629
+ do_classifier_free_guidance = guidance_scale > 1.0
630
+
631
+ # 3. Encode input prompt
632
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
633
+ prompt,
634
+ device,
635
+ num_images_per_prompt,
636
+ do_classifier_free_guidance,
637
+ negative_prompt,
638
+ prompt_embeds=prompt_embeds,
639
+ negative_prompt_embeds=negative_prompt_embeds,
640
+ )
641
+ # For classifier free guidance, we need to do two forward passes.
642
+ # Here we concatenate the unconditional and text embeddings into a single batch
643
+ # to avoid doing two forward passes
644
+ if do_classifier_free_guidance:
645
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
646
+
647
+ # 4. Preprocess image
648
+ rgb, depth = self.image_processor.preprocess(rgb, depth, target_res=target_res)
649
+ rgb = rgb.to(dtype=prompt_embeds.dtype, device=device)
650
+ depth = depth.to(dtype=prompt_embeds.dtype, device=device)
651
+
652
+ # 5. set timesteps
653
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
654
+ timesteps = self.scheduler.timesteps
655
+
656
+ # 6. Encode low resolutiom image to latent space
657
+ image = torch.cat([rgb, depth], axis=1)
658
+ latent_space_image = self.vae.encode(image).latent_dist.sample(generator)
659
+ latent_space_image *= self.vae.scaling_factor
660
+ noise_level = torch.tensor([noise_level], dtype=torch.long, device=device)
661
+ # noise_rgb = randn_tensor(rgb.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
662
+ # rgb = self.low_res_scheduler.add_noise(rgb, noise_rgb, noise_level)
663
+ # noise_depth = randn_tensor(depth.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
664
+ # depth = self.low_res_scheduler.add_noise(depth, noise_depth, noise_level)
665
+
666
+ batch_multiplier = 2 if do_classifier_free_guidance else 1
667
+ latent_space_image = torch.cat([latent_space_image] * batch_multiplier * num_images_per_prompt)
668
+ noise_level = torch.cat([noise_level] * latent_space_image.shape[0])
669
+
670
+ # 7. Prepare latent variables
671
+ height, width = latent_space_image.shape[2:]
672
+ num_channels_latents = self.vae.config.latent_channels
673
+
674
+ latents = self.prepare_latents(
675
+ batch_size * num_images_per_prompt,
676
+ num_channels_latents,
677
+ height,
678
+ width,
679
+ prompt_embeds.dtype,
680
+ device,
681
+ generator,
682
+ latents,
683
+ )
684
+
685
+ # 8. Check that sizes of image and latents match
686
+ num_channels_image = latent_space_image.shape[1]
687
+ if num_channels_latents + num_channels_image != self.unet.config.in_channels:
688
+ raise ValueError(
689
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
690
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
691
+ f" `num_channels_image`: {num_channels_image} "
692
+ f" = {num_channels_latents+num_channels_image}. Please verify the config of"
693
+ " `pipeline.unet` or your `image` input."
694
+ )
695
+
696
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
697
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
698
+
699
+ # 10. Denoising loop
700
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
701
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
702
+ for i, t in enumerate(timesteps):
703
+ # expand the latents if we are doing classifier free guidance
704
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
705
+
706
+ # concat latents, mask, masked_image_latents in the channel dimension
707
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
708
+ latent_model_input = torch.cat([latent_model_input, latent_space_image], dim=1)
709
+
710
+ # predict the noise residual
711
+ noise_pred = self.unet(
712
+ latent_model_input,
713
+ t,
714
+ encoder_hidden_states=prompt_embeds,
715
+ cross_attention_kwargs=cross_attention_kwargs,
716
+ class_labels=noise_level,
717
+ return_dict=False,
718
+ )[0]
719
+
720
+ # perform guidance
721
+ if do_classifier_free_guidance:
722
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
723
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
724
+
725
+ # compute the previous noisy sample x_t -> x_t-1
726
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
727
+
728
+ # call the callback, if provided
729
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
730
+ progress_bar.update()
731
+ if callback is not None and i % callback_steps == 0:
732
+ callback(i, t, latents)
733
+
734
+ if not output_type == "latent":
735
+ # make sure the VAE is in float32 mode, as it overflows in float16
736
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
737
+
738
+ if needs_upcasting:
739
+ self.upcast_vae()
740
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
741
+
742
+ image = self.vae.decode(latents / self.vae.scaling_factor, return_dict=False)[0]
743
+
744
+ # cast back to fp16 if needed
745
+ if needs_upcasting:
746
+ self.vae.to(dtype=torch.float16)
747
+
748
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
749
+
750
+ else:
751
+ image = latents
752
+ has_nsfw_concept = None
753
+
754
+ if has_nsfw_concept is None:
755
+ do_denormalize = [True] * image.shape[0]
756
+ else:
757
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
758
+
759
+ rgb, depth = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
760
+
761
+ # 11. Apply watermark
762
+ if output_type == "pil" and self.watermarker is not None:
763
+ rgb = self.watermarker.apply_watermark(rgb)
764
+
765
+ # Offload last model to CPU
766
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
767
+ self.final_offload_hook.offload()
768
+
769
+ if not return_dict:
770
+ return ((rgb, depth), has_nsfw_concept)
771
+
772
+ return LDM3DPipelineOutput(rgb=rgb, depth=depth, nsfw_content_detected=has_nsfw_concept)
v0.24.0/pipeline_stable_diffusion_xl_controlnet_adapter.py ADDED
@@ -0,0 +1,1463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 TencentARC and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
24
+
25
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
+ from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
27
+ from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
28
+ from diffusers.models.attention_processor import (
29
+ AttnProcessor2_0,
30
+ LoRAAttnProcessor2_0,
31
+ LoRAXFormersAttnProcessor,
32
+ XFormersAttnProcessor,
33
+ )
34
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
35
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
36
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
37
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
38
+ from diffusers.schedulers import KarrasDiffusionSchedulers
39
+ from diffusers.utils import (
40
+ PIL_INTERPOLATION,
41
+ USE_PEFT_BACKEND,
42
+ logging,
43
+ replace_example_docstring,
44
+ scale_lora_layers,
45
+ unscale_lora_layers,
46
+ )
47
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
48
+
49
+
50
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
51
+
52
+ EXAMPLE_DOC_STRING = """
53
+ Examples:
54
+ ```py
55
+ >>> import torch
56
+ >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler
57
+ >>> from diffusers.utils import load_image
58
+ >>> from controlnet_aux.midas import MidasDetector
59
+
60
+ >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
61
+ >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
62
+
63
+ >>> image = load_image(img_url).resize((1024, 1024))
64
+ >>> mask_image = load_image(mask_url).resize((1024, 1024))
65
+
66
+ >>> midas_depth = MidasDetector.from_pretrained(
67
+ ... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
68
+ ... ).to("cuda")
69
+
70
+ >>> depth_image = midas_depth(
71
+ ... image, detect_resolution=512, image_resolution=1024
72
+ ... )
73
+
74
+ >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0"
75
+
76
+ >>> adapter = T2IAdapter.from_pretrained(
77
+ ... "Adapter/t2iadapter",
78
+ ... subfolder="sketch_sdxl_1.0",
79
+ ... torch_dtype=torch.float16,
80
+ ... adapter_type="full_adapter_xl",
81
+ ... )
82
+
83
+ >>> controlnet = ControlNetModel.from_pretrained(
84
+ ... "diffusers/controlnet-depth-sdxl-1.0",
85
+ ... torch_dtype=torch.float16,
86
+ ... variant="fp16",
87
+ ... use_safetensors=True
88
+ ... ).to("cuda")
89
+
90
+ >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler")
91
+
92
+ >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
93
+ ... model_id,
94
+ ... adapter=adapter,
95
+ ... controlnet=controlnet,
96
+ ... torch_dtype=torch.float16,
97
+ ... variant="fp16",
98
+ ... scheduler=scheduler
99
+ ... ).to("cuda")
100
+
101
+ >>> strength = 0.5
102
+
103
+ >>> generator = torch.manual_seed(42)
104
+ >>> sketch_image_out = pipe(
105
+ ... prompt="a photo of a tiger sitting on a park bench",
106
+ ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",
107
+ ... adapter_image=depth_image,
108
+ ... control_image=mask_image,
109
+ ... adapter_conditioning_scale=strength,
110
+ ... controlnet_conditioning_scale=strength,
111
+ ... generator=generator,
112
+ ... guidance_scale=7.5,
113
+ ... ).images[0]
114
+ ```
115
+ """
116
+
117
+
118
+ def _preprocess_adapter_image(image, height, width):
119
+ if isinstance(image, torch.Tensor):
120
+ return image
121
+ elif isinstance(image, PIL.Image.Image):
122
+ image = [image]
123
+
124
+ if isinstance(image[0], PIL.Image.Image):
125
+ image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
126
+ image = [
127
+ i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
128
+ ] # expand [h, w] or [h, w, c] to [b, h, w, c]
129
+ image = np.concatenate(image, axis=0)
130
+ image = np.array(image).astype(np.float32) / 255.0
131
+ image = image.transpose(0, 3, 1, 2)
132
+ image = torch.from_numpy(image)
133
+ elif isinstance(image[0], torch.Tensor):
134
+ if image[0].ndim == 3:
135
+ image = torch.stack(image, dim=0)
136
+ elif image[0].ndim == 4:
137
+ image = torch.cat(image, dim=0)
138
+ else:
139
+ raise ValueError(
140
+ f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
141
+ )
142
+ return image
143
+
144
+
145
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
146
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
147
+ """
148
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
149
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
150
+ """
151
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
152
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
153
+ # rescale the results from guidance (fixes overexposure)
154
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
155
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
156
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
157
+ return noise_cfg
158
+
159
+
160
+ class StableDiffusionXLControlNetAdapterPipeline(
161
+ DiffusionPipeline, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
162
+ ):
163
+ r"""
164
+ Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
165
+ https://arxiv.org/abs/2302.08453
166
+
167
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
168
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
169
+
170
+ Args:
171
+ adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
172
+ Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
173
+ list, the outputs from each Adapter are added together to create one combined additional conditioning.
174
+ adapter_weights (`List[float]`, *optional*, defaults to None):
175
+ List of floats representing the weight which will be multiply to each adapter's output before adding them
176
+ together.
177
+ vae ([`AutoencoderKL`]):
178
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
179
+ text_encoder ([`CLIPTextModel`]):
180
+ Frozen text-encoder. Stable Diffusion uses the text portion of
181
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
182
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
183
+ tokenizer (`CLIPTokenizer`):
184
+ Tokenizer of class
185
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
186
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
187
+ scheduler ([`SchedulerMixin`]):
188
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
189
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
190
+ safety_checker ([`StableDiffusionSafetyChecker`]):
191
+ Classification module that estimates whether generated images could be considered offensive or harmful.
192
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
193
+ feature_extractor ([`CLIPFeatureExtractor`]):
194
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
195
+ """
196
+
197
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
198
+ _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"]
199
+
200
+ def __init__(
201
+ self,
202
+ vae: AutoencoderKL,
203
+ text_encoder: CLIPTextModel,
204
+ text_encoder_2: CLIPTextModelWithProjection,
205
+ tokenizer: CLIPTokenizer,
206
+ tokenizer_2: CLIPTokenizer,
207
+ unet: UNet2DConditionModel,
208
+ adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]],
209
+ controlnet: Union[ControlNetModel, MultiControlNetModel],
210
+ scheduler: KarrasDiffusionSchedulers,
211
+ force_zeros_for_empty_prompt: bool = True,
212
+ ):
213
+ super().__init__()
214
+
215
+ if isinstance(controlnet, (list, tuple)):
216
+ controlnet = MultiControlNetModel(controlnet)
217
+
218
+ self.register_modules(
219
+ vae=vae,
220
+ text_encoder=text_encoder,
221
+ text_encoder_2=text_encoder_2,
222
+ tokenizer=tokenizer,
223
+ tokenizer_2=tokenizer_2,
224
+ unet=unet,
225
+ adapter=adapter,
226
+ controlnet=controlnet,
227
+ scheduler=scheduler,
228
+ )
229
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
230
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
231
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
232
+ self.control_image_processor = VaeImageProcessor(
233
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
234
+ )
235
+ self.default_sample_size = self.unet.config.sample_size
236
+
237
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
238
+ def enable_vae_slicing(self):
239
+ r"""
240
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
241
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
242
+ """
243
+ self.vae.enable_slicing()
244
+
245
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
246
+ def disable_vae_slicing(self):
247
+ r"""
248
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
249
+ computing decoding in one step.
250
+ """
251
+ self.vae.disable_slicing()
252
+
253
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
254
+ def enable_vae_tiling(self):
255
+ r"""
256
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
257
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
258
+ processing larger images.
259
+ """
260
+ self.vae.enable_tiling()
261
+
262
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
263
+ def disable_vae_tiling(self):
264
+ r"""
265
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
266
+ computing decoding in one step.
267
+ """
268
+ self.vae.disable_tiling()
269
+
270
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
271
+ def encode_prompt(
272
+ self,
273
+ prompt: str,
274
+ prompt_2: Optional[str] = None,
275
+ device: Optional[torch.device] = None,
276
+ num_images_per_prompt: int = 1,
277
+ do_classifier_free_guidance: bool = True,
278
+ negative_prompt: Optional[str] = None,
279
+ negative_prompt_2: Optional[str] = None,
280
+ prompt_embeds: Optional[torch.FloatTensor] = None,
281
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
282
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
283
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
284
+ lora_scale: Optional[float] = None,
285
+ clip_skip: Optional[int] = None,
286
+ ):
287
+ r"""
288
+ Encodes the prompt into text encoder hidden states.
289
+
290
+ Args:
291
+ prompt (`str` or `List[str]`, *optional*):
292
+ prompt to be encoded
293
+ prompt_2 (`str` or `List[str]`, *optional*):
294
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
295
+ used in both text-encoders
296
+ device: (`torch.device`):
297
+ torch device
298
+ num_images_per_prompt (`int`):
299
+ number of images that should be generated per prompt
300
+ do_classifier_free_guidance (`bool`):
301
+ whether to use classifier free guidance or not
302
+ negative_prompt (`str` or `List[str]`, *optional*):
303
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
304
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
305
+ less than `1`).
306
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
307
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
308
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
309
+ prompt_embeds (`torch.FloatTensor`, *optional*):
310
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
311
+ provided, text embeddings will be generated from `prompt` input argument.
312
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
313
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
314
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
315
+ argument.
316
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
317
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
318
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
319
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
320
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
321
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
322
+ input argument.
323
+ lora_scale (`float`, *optional*):
324
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
325
+ clip_skip (`int`, *optional*):
326
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
327
+ the output of the pre-final layer will be used for computing the prompt embeddings.
328
+ """
329
+ device = device or self._execution_device
330
+
331
+ # set lora scale so that monkey patched LoRA
332
+ # function of text encoder can correctly access it
333
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
334
+ self._lora_scale = lora_scale
335
+
336
+ # dynamically adjust the LoRA scale
337
+ if self.text_encoder is not None:
338
+ if not USE_PEFT_BACKEND:
339
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
340
+ else:
341
+ scale_lora_layers(self.text_encoder, lora_scale)
342
+
343
+ if self.text_encoder_2 is not None:
344
+ if not USE_PEFT_BACKEND:
345
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
346
+ else:
347
+ scale_lora_layers(self.text_encoder_2, lora_scale)
348
+
349
+ prompt = [prompt] if isinstance(prompt, str) else prompt
350
+
351
+ if prompt is not None:
352
+ batch_size = len(prompt)
353
+ else:
354
+ batch_size = prompt_embeds.shape[0]
355
+
356
+ # Define tokenizers and text encoders
357
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
358
+ text_encoders = (
359
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
360
+ )
361
+
362
+ if prompt_embeds is None:
363
+ prompt_2 = prompt_2 or prompt
364
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
365
+
366
+ # textual inversion: procecss multi-vector tokens if necessary
367
+ prompt_embeds_list = []
368
+ prompts = [prompt, prompt_2]
369
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
370
+ if isinstance(self, TextualInversionLoaderMixin):
371
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
372
+
373
+ text_inputs = tokenizer(
374
+ prompt,
375
+ padding="max_length",
376
+ max_length=tokenizer.model_max_length,
377
+ truncation=True,
378
+ return_tensors="pt",
379
+ )
380
+
381
+ text_input_ids = text_inputs.input_ids
382
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
383
+
384
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
385
+ text_input_ids, untruncated_ids
386
+ ):
387
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
388
+ logger.warning(
389
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
390
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
391
+ )
392
+
393
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
394
+
395
+ # We are only ALWAYS interested in the pooled output of the final text encoder
396
+ pooled_prompt_embeds = prompt_embeds[0]
397
+ if clip_skip is None:
398
+ prompt_embeds = prompt_embeds.hidden_states[-2]
399
+ else:
400
+ # "2" because SDXL always indexes from the penultimate layer.
401
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
402
+
403
+ prompt_embeds_list.append(prompt_embeds)
404
+
405
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
406
+
407
+ # get unconditional embeddings for classifier free guidance
408
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
409
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
410
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
411
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
412
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
413
+ negative_prompt = negative_prompt or ""
414
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
415
+
416
+ # normalize str to list
417
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
418
+ negative_prompt_2 = (
419
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
420
+ )
421
+
422
+ uncond_tokens: List[str]
423
+ if prompt is not None and type(prompt) is not type(negative_prompt):
424
+ raise TypeError(
425
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
426
+ f" {type(prompt)}."
427
+ )
428
+ elif batch_size != len(negative_prompt):
429
+ raise ValueError(
430
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
431
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
432
+ " the batch size of `prompt`."
433
+ )
434
+ else:
435
+ uncond_tokens = [negative_prompt, negative_prompt_2]
436
+
437
+ negative_prompt_embeds_list = []
438
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
439
+ if isinstance(self, TextualInversionLoaderMixin):
440
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
441
+
442
+ max_length = prompt_embeds.shape[1]
443
+ uncond_input = tokenizer(
444
+ negative_prompt,
445
+ padding="max_length",
446
+ max_length=max_length,
447
+ truncation=True,
448
+ return_tensors="pt",
449
+ )
450
+
451
+ negative_prompt_embeds = text_encoder(
452
+ uncond_input.input_ids.to(device),
453
+ output_hidden_states=True,
454
+ )
455
+ # We are only ALWAYS interested in the pooled output of the final text encoder
456
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
457
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
458
+
459
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
460
+
461
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
462
+
463
+ if self.text_encoder_2 is not None:
464
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
465
+ else:
466
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
467
+
468
+ bs_embed, seq_len, _ = prompt_embeds.shape
469
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
470
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
471
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
472
+
473
+ if do_classifier_free_guidance:
474
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
475
+ seq_len = negative_prompt_embeds.shape[1]
476
+
477
+ if self.text_encoder_2 is not None:
478
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
479
+ else:
480
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
481
+
482
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
483
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
484
+
485
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
486
+ bs_embed * num_images_per_prompt, -1
487
+ )
488
+ if do_classifier_free_guidance:
489
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
490
+ bs_embed * num_images_per_prompt, -1
491
+ )
492
+
493
+ if self.text_encoder is not None:
494
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
495
+ # Retrieve the original scale by scaling back the LoRA layers
496
+ unscale_lora_layers(self.text_encoder, lora_scale)
497
+
498
+ if self.text_encoder_2 is not None:
499
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
500
+ # Retrieve the original scale by scaling back the LoRA layers
501
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
502
+
503
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
504
+
505
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
506
+ def prepare_extra_step_kwargs(self, generator, eta):
507
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
508
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
509
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
510
+ # and should be between [0, 1]
511
+
512
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
513
+ extra_step_kwargs = {}
514
+ if accepts_eta:
515
+ extra_step_kwargs["eta"] = eta
516
+
517
+ # check if the scheduler accepts generator
518
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
519
+ if accepts_generator:
520
+ extra_step_kwargs["generator"] = generator
521
+ return extra_step_kwargs
522
+
523
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
524
+ def check_image(self, image, prompt, prompt_embeds):
525
+ image_is_pil = isinstance(image, PIL.Image.Image)
526
+ image_is_tensor = isinstance(image, torch.Tensor)
527
+ image_is_np = isinstance(image, np.ndarray)
528
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
529
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
530
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
531
+
532
+ if (
533
+ not image_is_pil
534
+ and not image_is_tensor
535
+ and not image_is_np
536
+ and not image_is_pil_list
537
+ and not image_is_tensor_list
538
+ and not image_is_np_list
539
+ ):
540
+ raise TypeError(
541
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
542
+ )
543
+
544
+ if image_is_pil:
545
+ image_batch_size = 1
546
+ else:
547
+ image_batch_size = len(image)
548
+
549
+ if prompt is not None and isinstance(prompt, str):
550
+ prompt_batch_size = 1
551
+ elif prompt is not None and isinstance(prompt, list):
552
+ prompt_batch_size = len(prompt)
553
+ elif prompt_embeds is not None:
554
+ prompt_batch_size = prompt_embeds.shape[0]
555
+
556
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
557
+ raise ValueError(
558
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
559
+ )
560
+
561
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs
562
+ def check_inputs(
563
+ self,
564
+ prompt,
565
+ prompt_2,
566
+ height,
567
+ width,
568
+ callback_steps,
569
+ negative_prompt=None,
570
+ negative_prompt_2=None,
571
+ prompt_embeds=None,
572
+ negative_prompt_embeds=None,
573
+ pooled_prompt_embeds=None,
574
+ negative_pooled_prompt_embeds=None,
575
+ callback_on_step_end_tensor_inputs=None,
576
+ ):
577
+ if height % 8 != 0 or width % 8 != 0:
578
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
579
+
580
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
581
+ raise ValueError(
582
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
583
+ f" {type(callback_steps)}."
584
+ )
585
+
586
+ if callback_on_step_end_tensor_inputs is not None and not all(
587
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
588
+ ):
589
+ raise ValueError(
590
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
591
+ )
592
+
593
+ if prompt is not None and prompt_embeds is not None:
594
+ raise ValueError(
595
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
596
+ " only forward one of the two."
597
+ )
598
+ elif prompt_2 is not None and prompt_embeds is not None:
599
+ raise ValueError(
600
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
601
+ " only forward one of the two."
602
+ )
603
+ elif prompt is None and prompt_embeds is None:
604
+ raise ValueError(
605
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
606
+ )
607
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
608
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
609
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
610
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
611
+
612
+ if negative_prompt is not None and negative_prompt_embeds is not None:
613
+ raise ValueError(
614
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
615
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
616
+ )
617
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
618
+ raise ValueError(
619
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
620
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
621
+ )
622
+
623
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
624
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
625
+ raise ValueError(
626
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
627
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
628
+ f" {negative_prompt_embeds.shape}."
629
+ )
630
+
631
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
632
+ raise ValueError(
633
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
634
+ )
635
+
636
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
637
+ raise ValueError(
638
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
639
+ )
640
+
641
+ def check_conditions(
642
+ self,
643
+ prompt,
644
+ prompt_embeds,
645
+ adapter_image,
646
+ control_image,
647
+ adapter_conditioning_scale,
648
+ controlnet_conditioning_scale,
649
+ control_guidance_start,
650
+ control_guidance_end,
651
+ ):
652
+ # controlnet checks
653
+ if not isinstance(control_guidance_start, (tuple, list)):
654
+ control_guidance_start = [control_guidance_start]
655
+
656
+ if not isinstance(control_guidance_end, (tuple, list)):
657
+ control_guidance_end = [control_guidance_end]
658
+
659
+ if len(control_guidance_start) != len(control_guidance_end):
660
+ raise ValueError(
661
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
662
+ )
663
+
664
+ if isinstance(self.controlnet, MultiControlNetModel):
665
+ if len(control_guidance_start) != len(self.controlnet.nets):
666
+ raise ValueError(
667
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
668
+ )
669
+
670
+ for start, end in zip(control_guidance_start, control_guidance_end):
671
+ if start >= end:
672
+ raise ValueError(
673
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
674
+ )
675
+ if start < 0.0:
676
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
677
+ if end > 1.0:
678
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
679
+
680
+ # Check controlnet `image`
681
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
682
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
683
+ )
684
+ if (
685
+ isinstance(self.controlnet, ControlNetModel)
686
+ or is_compiled
687
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
688
+ ):
689
+ self.check_image(control_image, prompt, prompt_embeds)
690
+ elif (
691
+ isinstance(self.controlnet, MultiControlNetModel)
692
+ or is_compiled
693
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
694
+ ):
695
+ if not isinstance(control_image, list):
696
+ raise TypeError("For multiple controlnets: `control_image` must be type `list`")
697
+
698
+ # When `image` is a nested list:
699
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
700
+ elif any(isinstance(i, list) for i in control_image):
701
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
702
+ elif len(control_image) != len(self.controlnet.nets):
703
+ raise ValueError(
704
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets."
705
+ )
706
+
707
+ for image_ in control_image:
708
+ self.check_image(image_, prompt, prompt_embeds)
709
+ else:
710
+ assert False
711
+
712
+ # Check `controlnet_conditioning_scale`
713
+ if (
714
+ isinstance(self.controlnet, ControlNetModel)
715
+ or is_compiled
716
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
717
+ ):
718
+ if not isinstance(controlnet_conditioning_scale, float):
719
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
720
+ elif (
721
+ isinstance(self.controlnet, MultiControlNetModel)
722
+ or is_compiled
723
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
724
+ ):
725
+ if isinstance(controlnet_conditioning_scale, list):
726
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
727
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
728
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
729
+ self.controlnet.nets
730
+ ):
731
+ raise ValueError(
732
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
733
+ " the same length as the number of controlnets"
734
+ )
735
+ else:
736
+ assert False
737
+
738
+ # adapter checks
739
+ if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
740
+ self.check_image(adapter_image, prompt, prompt_embeds)
741
+ elif (
742
+ isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
743
+ ):
744
+ if not isinstance(adapter_image, list):
745
+ raise TypeError("For multiple adapters: `adapter_image` must be type `list`")
746
+
747
+ # When `image` is a nested list:
748
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
749
+ elif any(isinstance(i, list) for i in adapter_image):
750
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
751
+ elif len(adapter_image) != len(self.adapter.adapters):
752
+ raise ValueError(
753
+ f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters."
754
+ )
755
+
756
+ for image_ in adapter_image:
757
+ self.check_image(image_, prompt, prompt_embeds)
758
+ else:
759
+ assert False
760
+
761
+ # Check `adapter_conditioning_scale`
762
+ if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
763
+ if not isinstance(adapter_conditioning_scale, float):
764
+ raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.")
765
+ elif (
766
+ isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
767
+ ):
768
+ if isinstance(adapter_conditioning_scale, list):
769
+ if any(isinstance(i, list) for i in adapter_conditioning_scale):
770
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
771
+ elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len(
772
+ self.adapter.adapters
773
+ ):
774
+ raise ValueError(
775
+ "For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have"
776
+ " the same length as the number of adapters"
777
+ )
778
+ else:
779
+ assert False
780
+
781
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
782
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
783
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
784
+ if isinstance(generator, list) and len(generator) != batch_size:
785
+ raise ValueError(
786
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
787
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
788
+ )
789
+
790
+ if latents is None:
791
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
792
+ else:
793
+ latents = latents.to(device)
794
+
795
+ # scale the initial noise by the standard deviation required by the scheduler
796
+ latents = latents * self.scheduler.init_noise_sigma
797
+ return latents
798
+
799
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
800
+ def _get_add_time_ids(
801
+ self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
802
+ ):
803
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
804
+
805
+ passed_add_embed_dim = (
806
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
807
+ )
808
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
809
+
810
+ if expected_add_embed_dim != passed_add_embed_dim:
811
+ raise ValueError(
812
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
813
+ )
814
+
815
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
816
+ return add_time_ids
817
+
818
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
819
+ def upcast_vae(self):
820
+ dtype = self.vae.dtype
821
+ self.vae.to(dtype=torch.float32)
822
+ use_torch_2_0_or_xformers = isinstance(
823
+ self.vae.decoder.mid_block.attentions[0].processor,
824
+ (
825
+ AttnProcessor2_0,
826
+ XFormersAttnProcessor,
827
+ LoRAXFormersAttnProcessor,
828
+ LoRAAttnProcessor2_0,
829
+ ),
830
+ )
831
+ # if xformers or torch_2_0 is used attention block does not need
832
+ # to be in float32 which can save lots of memory
833
+ if use_torch_2_0_or_xformers:
834
+ self.vae.post_quant_conv.to(dtype)
835
+ self.vae.decoder.conv_in.to(dtype)
836
+ self.vae.decoder.mid_block.to(dtype)
837
+
838
+ # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
839
+ def _default_height_width(self, height, width, image):
840
+ # NOTE: It is possible that a list of images have different
841
+ # dimensions for each image, so just checking the first image
842
+ # is not _exactly_ correct, but it is simple.
843
+ while isinstance(image, list):
844
+ image = image[0]
845
+
846
+ if height is None:
847
+ if isinstance(image, PIL.Image.Image):
848
+ height = image.height
849
+ elif isinstance(image, torch.Tensor):
850
+ height = image.shape[-2]
851
+
852
+ # round down to nearest multiple of `self.adapter.downscale_factor`
853
+ height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
854
+
855
+ if width is None:
856
+ if isinstance(image, PIL.Image.Image):
857
+ width = image.width
858
+ elif isinstance(image, torch.Tensor):
859
+ width = image.shape[-1]
860
+
861
+ # round down to nearest multiple of `self.adapter.downscale_factor`
862
+ width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
863
+
864
+ return height, width
865
+
866
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
867
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
868
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
869
+
870
+ The suffixes after the scaling factors represent the stages where they are being applied.
871
+
872
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
873
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
874
+
875
+ Args:
876
+ s1 (`float`):
877
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
878
+ mitigate "oversmoothing effect" in the enhanced denoising process.
879
+ s2 (`float`):
880
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
881
+ mitigate "oversmoothing effect" in the enhanced denoising process.
882
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
883
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
884
+ """
885
+ if not hasattr(self, "unet"):
886
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
887
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
888
+
889
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
890
+ def disable_freeu(self):
891
+ """Disables the FreeU mechanism if enabled."""
892
+ self.unet.disable_freeu()
893
+
894
+ def prepare_control_image(
895
+ self,
896
+ image,
897
+ width,
898
+ height,
899
+ batch_size,
900
+ num_images_per_prompt,
901
+ device,
902
+ dtype,
903
+ do_classifier_free_guidance=False,
904
+ guess_mode=False,
905
+ ):
906
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
907
+ image_batch_size = image.shape[0]
908
+
909
+ if image_batch_size == 1:
910
+ repeat_by = batch_size
911
+ else:
912
+ # image batch size is the same as prompt batch size
913
+ repeat_by = num_images_per_prompt
914
+
915
+ image = image.repeat_interleave(repeat_by, dim=0)
916
+
917
+ image = image.to(device=device, dtype=dtype)
918
+
919
+ if do_classifier_free_guidance and not guess_mode:
920
+ image = torch.cat([image] * 2)
921
+
922
+ return image
923
+
924
+ @torch.no_grad()
925
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
926
+ def __call__(
927
+ self,
928
+ prompt: Union[str, List[str]] = None,
929
+ prompt_2: Optional[Union[str, List[str]]] = None,
930
+ adapter_image: PipelineImageInput = None,
931
+ control_image: PipelineImageInput = None,
932
+ height: Optional[int] = None,
933
+ width: Optional[int] = None,
934
+ num_inference_steps: int = 50,
935
+ denoising_end: Optional[float] = None,
936
+ guidance_scale: float = 5.0,
937
+ negative_prompt: Optional[Union[str, List[str]]] = None,
938
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
939
+ num_images_per_prompt: Optional[int] = 1,
940
+ eta: float = 0.0,
941
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
942
+ latents: Optional[torch.FloatTensor] = None,
943
+ prompt_embeds: Optional[torch.FloatTensor] = None,
944
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
945
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
946
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
947
+ output_type: Optional[str] = "pil",
948
+ return_dict: bool = True,
949
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
950
+ callback_steps: int = 1,
951
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
952
+ guidance_rescale: float = 0.0,
953
+ original_size: Optional[Tuple[int, int]] = None,
954
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
955
+ target_size: Optional[Tuple[int, int]] = None,
956
+ negative_original_size: Optional[Tuple[int, int]] = None,
957
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
958
+ negative_target_size: Optional[Tuple[int, int]] = None,
959
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
960
+ adapter_conditioning_factor: float = 1.0,
961
+ clip_skip: Optional[int] = None,
962
+ controlnet_conditioning_scale=1.0,
963
+ guess_mode: bool = False,
964
+ control_guidance_start: float = 0.0,
965
+ control_guidance_end: float = 1.0,
966
+ ):
967
+ r"""
968
+ Function invoked when calling the pipeline for generation.
969
+
970
+ Args:
971
+ prompt (`str` or `List[str]`, *optional*):
972
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
973
+ instead.
974
+ prompt_2 (`str` or `List[str]`, *optional*):
975
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
976
+ used in both text-encoders
977
+ adapter_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
978
+ The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
979
+ type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
980
+ accepted as an image. The control image is automatically resized to fit the output image.
981
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
982
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
983
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
984
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
985
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
986
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
987
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
988
+ input to a single ControlNet.
989
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
990
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
991
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
992
+ and checkpoints that are not specifically fine-tuned on low resolutions.
993
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
994
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
995
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
996
+ and checkpoints that are not specifically fine-tuned on low resolutions.
997
+ num_inference_steps (`int`, *optional*, defaults to 50):
998
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
999
+ expense of slower inference.
1000
+ denoising_end (`float`, *optional*):
1001
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1002
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
1003
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
1004
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
1005
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1006
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
1007
+ guidance_scale (`float`, *optional*, defaults to 5.0):
1008
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1009
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1010
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1011
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1012
+ usually at the expense of lower image quality.
1013
+ negative_prompt (`str` or `List[str]`, *optional*):
1014
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1015
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1016
+ less than `1`).
1017
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1018
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1019
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1020
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1021
+ The number of images to generate per prompt.
1022
+ eta (`float`, *optional*, defaults to 0.0):
1023
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1024
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1025
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1026
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1027
+ to make generation deterministic.
1028
+ latents (`torch.FloatTensor`, *optional*):
1029
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1030
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1031
+ tensor will ge generated by sampling using the supplied random `generator`.
1032
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1033
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1034
+ provided, text embeddings will be generated from `prompt` input argument.
1035
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1036
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1037
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1038
+ argument.
1039
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1040
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1041
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1042
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1043
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1044
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1045
+ input argument.
1046
+ output_type (`str`, *optional*, defaults to `"pil"`):
1047
+ The output format of the generate image. Choose between
1048
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1049
+ return_dict (`bool`, *optional*, defaults to `True`):
1050
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`]
1051
+ instead of a plain tuple.
1052
+ callback (`Callable`, *optional*):
1053
+ A function that will be called every `callback_steps` steps during inference. The function will be
1054
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1055
+ callback_steps (`int`, *optional*, defaults to 1):
1056
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1057
+ called at every step.
1058
+ cross_attention_kwargs (`dict`, *optional*):
1059
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1060
+ `self.processor` in
1061
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1062
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
1063
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1064
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1065
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1066
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
1067
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1068
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1069
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1070
+ explained in section 2.2 of
1071
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1072
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1073
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1074
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1075
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1076
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1077
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1078
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1079
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1080
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1081
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1082
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1083
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1084
+ micro-conditioning as explained in section 2.2 of
1085
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1086
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1087
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1088
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1089
+ micro-conditioning as explained in section 2.2 of
1090
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1091
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1092
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1093
+ To negatively condition the generation process based on a target image resolution. It should be as same
1094
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1095
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1096
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1097
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1098
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the
1099
+ residual in the original unet. If multiple adapters are specified in init, you can set the
1100
+ corresponding scale as a list.
1101
+ adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1102
+ The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
1103
+ residual in the original unet. If multiple adapters are specified in init, you can set the
1104
+ corresponding scale as a list.
1105
+ adapter_conditioning_factor (`float`, *optional*, defaults to 1.0):
1106
+ The fraction of timesteps for which adapter should be applied. If `adapter_conditioning_factor` is
1107
+ `0.0`, adapter is not applied at all. If `adapter_conditioning_factor` is `1.0`, adapter is applied for
1108
+ all timesteps. If `adapter_conditioning_factor` is `0.5`, adapter is applied for half of the timesteps.
1109
+ clip_skip (`int`, *optional*):
1110
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1111
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1112
+
1113
+ Examples:
1114
+
1115
+ Returns:
1116
+ [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
1117
+ [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
1118
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
1119
+ """
1120
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1121
+ adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter
1122
+
1123
+ # 0. Default height and width to unet
1124
+
1125
+ height, width = self._default_height_width(height, width, adapter_image)
1126
+ device = self._execution_device
1127
+
1128
+ if isinstance(adapter, MultiAdapter):
1129
+ adapter_input = []
1130
+
1131
+ for one_image in adapter_image:
1132
+ one_image = _preprocess_adapter_image(one_image, height, width)
1133
+ one_image = one_image.to(device=device, dtype=adapter.dtype)
1134
+ adapter_input.append(one_image)
1135
+ else:
1136
+ adapter_input = _preprocess_adapter_image(adapter_image, height, width)
1137
+ adapter_input = adapter_input.to(device=device, dtype=adapter.dtype)
1138
+ original_size = original_size or (height, width)
1139
+ target_size = target_size or (height, width)
1140
+
1141
+ # 0.1 align format for control guidance
1142
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1143
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1144
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1145
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1146
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1147
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1148
+ control_guidance_start, control_guidance_end = (
1149
+ mult * [control_guidance_start],
1150
+ mult * [control_guidance_end],
1151
+ )
1152
+
1153
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1154
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1155
+ if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float):
1156
+ adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.adapters)
1157
+
1158
+ # 1. Check inputs. Raise error if not correct
1159
+ self.check_inputs(
1160
+ prompt,
1161
+ prompt_2,
1162
+ height,
1163
+ width,
1164
+ callback_steps,
1165
+ negative_prompt=negative_prompt,
1166
+ negative_prompt_2=negative_prompt_2,
1167
+ prompt_embeds=prompt_embeds,
1168
+ negative_prompt_embeds=negative_prompt_embeds,
1169
+ pooled_prompt_embeds=pooled_prompt_embeds,
1170
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1171
+ )
1172
+
1173
+ self.check_conditions(
1174
+ prompt,
1175
+ prompt_embeds,
1176
+ adapter_image,
1177
+ control_image,
1178
+ adapter_conditioning_scale,
1179
+ controlnet_conditioning_scale,
1180
+ control_guidance_start,
1181
+ control_guidance_end,
1182
+ )
1183
+
1184
+ # 2. Define call parameters
1185
+ if prompt is not None and isinstance(prompt, str):
1186
+ batch_size = 1
1187
+ elif prompt is not None and isinstance(prompt, list):
1188
+ batch_size = len(prompt)
1189
+ else:
1190
+ batch_size = prompt_embeds.shape[0]
1191
+
1192
+ device = self._execution_device
1193
+
1194
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1195
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1196
+ # corresponds to doing no classifier free guidance.
1197
+ do_classifier_free_guidance = guidance_scale > 1.0
1198
+
1199
+ # 3. Encode input prompt
1200
+ (
1201
+ prompt_embeds,
1202
+ negative_prompt_embeds,
1203
+ pooled_prompt_embeds,
1204
+ negative_pooled_prompt_embeds,
1205
+ ) = self.encode_prompt(
1206
+ prompt=prompt,
1207
+ prompt_2=prompt_2,
1208
+ device=device,
1209
+ num_images_per_prompt=num_images_per_prompt,
1210
+ do_classifier_free_guidance=do_classifier_free_guidance,
1211
+ negative_prompt=negative_prompt,
1212
+ negative_prompt_2=negative_prompt_2,
1213
+ prompt_embeds=prompt_embeds,
1214
+ negative_prompt_embeds=negative_prompt_embeds,
1215
+ pooled_prompt_embeds=pooled_prompt_embeds,
1216
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1217
+ clip_skip=clip_skip,
1218
+ )
1219
+
1220
+ # 4. Prepare timesteps
1221
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1222
+
1223
+ timesteps = self.scheduler.timesteps
1224
+
1225
+ # 5. Prepare latent variables
1226
+ num_channels_latents = self.unet.config.in_channels
1227
+ latents = self.prepare_latents(
1228
+ batch_size * num_images_per_prompt,
1229
+ num_channels_latents,
1230
+ height,
1231
+ width,
1232
+ prompt_embeds.dtype,
1233
+ device,
1234
+ generator,
1235
+ latents,
1236
+ )
1237
+
1238
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1239
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1240
+
1241
+ # 7. Prepare added time ids & embeddings & adapter features
1242
+ if isinstance(adapter, MultiAdapter):
1243
+ adapter_state = adapter(adapter_input, adapter_conditioning_scale)
1244
+ for k, v in enumerate(adapter_state):
1245
+ adapter_state[k] = v
1246
+ else:
1247
+ adapter_state = adapter(adapter_input)
1248
+ for k, v in enumerate(adapter_state):
1249
+ adapter_state[k] = v * adapter_conditioning_scale
1250
+ if num_images_per_prompt > 1:
1251
+ for k, v in enumerate(adapter_state):
1252
+ adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
1253
+ if do_classifier_free_guidance:
1254
+ for k, v in enumerate(adapter_state):
1255
+ adapter_state[k] = torch.cat([v] * 2, dim=0)
1256
+
1257
+ # 7.2 Prepare control images
1258
+ if isinstance(controlnet, ControlNetModel):
1259
+ control_image = self.prepare_control_image(
1260
+ image=control_image,
1261
+ width=width,
1262
+ height=height,
1263
+ batch_size=batch_size * num_images_per_prompt,
1264
+ num_images_per_prompt=num_images_per_prompt,
1265
+ device=device,
1266
+ dtype=controlnet.dtype,
1267
+ do_classifier_free_guidance=do_classifier_free_guidance,
1268
+ guess_mode=guess_mode,
1269
+ )
1270
+ elif isinstance(controlnet, MultiControlNetModel):
1271
+ control_images = []
1272
+
1273
+ for control_image_ in control_image:
1274
+ control_image_ = self.prepare_control_image(
1275
+ image=control_image_,
1276
+ width=width,
1277
+ height=height,
1278
+ batch_size=batch_size * num_images_per_prompt,
1279
+ num_images_per_prompt=num_images_per_prompt,
1280
+ device=device,
1281
+ dtype=controlnet.dtype,
1282
+ do_classifier_free_guidance=do_classifier_free_guidance,
1283
+ guess_mode=guess_mode,
1284
+ )
1285
+
1286
+ control_images.append(control_image_)
1287
+
1288
+ control_image = control_images
1289
+ else:
1290
+ raise ValueError(f"{controlnet.__class__} is not supported.")
1291
+
1292
+ # 8.2 Create tensor stating which controlnets to keep
1293
+ controlnet_keep = []
1294
+ for i in range(len(timesteps)):
1295
+ keeps = [
1296
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1297
+ for s, e in zip(control_guidance_start, control_guidance_end)
1298
+ ]
1299
+ if isinstance(self.controlnet, MultiControlNetModel):
1300
+ controlnet_keep.append(keeps)
1301
+ else:
1302
+ controlnet_keep.append(keeps[0])
1303
+
1304
+ add_text_embeds = pooled_prompt_embeds
1305
+ if self.text_encoder_2 is None:
1306
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1307
+ else:
1308
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1309
+
1310
+ add_time_ids = self._get_add_time_ids(
1311
+ original_size,
1312
+ crops_coords_top_left,
1313
+ target_size,
1314
+ dtype=prompt_embeds.dtype,
1315
+ text_encoder_projection_dim=text_encoder_projection_dim,
1316
+ )
1317
+ if negative_original_size is not None and negative_target_size is not None:
1318
+ negative_add_time_ids = self._get_add_time_ids(
1319
+ negative_original_size,
1320
+ negative_crops_coords_top_left,
1321
+ negative_target_size,
1322
+ dtype=prompt_embeds.dtype,
1323
+ text_encoder_projection_dim=text_encoder_projection_dim,
1324
+ )
1325
+ else:
1326
+ negative_add_time_ids = add_time_ids
1327
+
1328
+ if do_classifier_free_guidance:
1329
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1330
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1331
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1332
+
1333
+ prompt_embeds = prompt_embeds.to(device)
1334
+ add_text_embeds = add_text_embeds.to(device)
1335
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1336
+
1337
+ # 8. Denoising loop
1338
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1339
+
1340
+ # 7.1 Apply denoising_end
1341
+ if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
1342
+ discrete_timestep_cutoff = int(
1343
+ round(
1344
+ self.scheduler.config.num_train_timesteps
1345
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
1346
+ )
1347
+ )
1348
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1349
+ timesteps = timesteps[:num_inference_steps]
1350
+
1351
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1352
+ for i, t in enumerate(timesteps):
1353
+ # expand the latents if we are doing classifier free guidance
1354
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1355
+
1356
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1357
+
1358
+ # predict the noise residual
1359
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1360
+
1361
+ if i < int(num_inference_steps * adapter_conditioning_factor):
1362
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
1363
+ else:
1364
+ down_intrablock_additional_residuals = None
1365
+
1366
+ # ----------- ControlNet
1367
+
1368
+ # expand the latents if we are doing classifier free guidance
1369
+ latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1370
+
1371
+ # concat latents, mask, masked_image_latents in the channel dimension
1372
+ latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t)
1373
+
1374
+ # controlnet(s) inference
1375
+ if guess_mode and do_classifier_free_guidance:
1376
+ # Infer ControlNet only for the conditional batch.
1377
+ control_model_input = latents
1378
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1379
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1380
+ controlnet_added_cond_kwargs = {
1381
+ "text_embeds": add_text_embeds.chunk(2)[1],
1382
+ "time_ids": add_time_ids.chunk(2)[1],
1383
+ }
1384
+ else:
1385
+ control_model_input = latent_model_input_controlnet
1386
+ controlnet_prompt_embeds = prompt_embeds
1387
+ controlnet_added_cond_kwargs = added_cond_kwargs
1388
+
1389
+ if isinstance(controlnet_keep[i], list):
1390
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1391
+ else:
1392
+ controlnet_cond_scale = controlnet_conditioning_scale
1393
+ if isinstance(controlnet_cond_scale, list):
1394
+ controlnet_cond_scale = controlnet_cond_scale[0]
1395
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1396
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1397
+ control_model_input,
1398
+ t,
1399
+ encoder_hidden_states=controlnet_prompt_embeds,
1400
+ controlnet_cond=control_image,
1401
+ conditioning_scale=cond_scale,
1402
+ guess_mode=guess_mode,
1403
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1404
+ return_dict=False,
1405
+ )
1406
+
1407
+ noise_pred = self.unet(
1408
+ latent_model_input,
1409
+ t,
1410
+ encoder_hidden_states=prompt_embeds,
1411
+ cross_attention_kwargs=cross_attention_kwargs,
1412
+ added_cond_kwargs=added_cond_kwargs,
1413
+ return_dict=False,
1414
+ down_intrablock_additional_residuals=down_intrablock_additional_residuals, # t2iadapter
1415
+ down_block_additional_residuals=down_block_res_samples, # controlnet
1416
+ mid_block_additional_residual=mid_block_res_sample, # controlnet
1417
+ )[0]
1418
+
1419
+ # perform guidance
1420
+ if do_classifier_free_guidance:
1421
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1422
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1423
+
1424
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1425
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1426
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1427
+
1428
+ # compute the previous noisy sample x_t -> x_t-1
1429
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1430
+
1431
+ # call the callback, if provided
1432
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1433
+ progress_bar.update()
1434
+ if callback is not None and i % callback_steps == 0:
1435
+ step_idx = i // getattr(self.scheduler, "order", 1)
1436
+ callback(step_idx, t, latents)
1437
+
1438
+ if not output_type == "latent":
1439
+ # make sure the VAE is in float32 mode, as it overflows in float16
1440
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1441
+
1442
+ if needs_upcasting:
1443
+ self.upcast_vae()
1444
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1445
+
1446
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1447
+
1448
+ # cast back to fp16 if needed
1449
+ if needs_upcasting:
1450
+ self.vae.to(dtype=torch.float16)
1451
+ else:
1452
+ image = latents
1453
+ return StableDiffusionXLPipelineOutput(images=image)
1454
+
1455
+ image = self.image_processor.postprocess(image, output_type=output_type)
1456
+
1457
+ # Offload all models
1458
+ self.maybe_free_model_hooks()
1459
+
1460
+ if not return_dict:
1461
+ return (image,)
1462
+
1463
+ return StableDiffusionXLPipelineOutput(images=image)
v0.24.0/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py ADDED
@@ -0,0 +1,1908 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Jake Babbidge, TencentARC and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # ignore the entire file for precommit
16
+ # type: ignore
17
+
18
+ import inspect
19
+ from collections.abc import Callable
20
+ from typing import Any, List, Optional, Union
21
+
22
+ import numpy as np
23
+ import PIL
24
+ import torch
25
+ import torch.nn.functional as F
26
+ from transformers import (
27
+ CLIPTextModel,
28
+ CLIPTextModelWithProjection,
29
+ CLIPTokenizer,
30
+ )
31
+
32
+ from diffusers import DiffusionPipeline
33
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
34
+ from diffusers.loaders import (
35
+ FromSingleFileMixin,
36
+ LoraLoaderMixin,
37
+ StableDiffusionXLLoraLoaderMixin,
38
+ TextualInversionLoaderMixin,
39
+ )
40
+ from diffusers.models import (
41
+ AutoencoderKL,
42
+ ControlNetModel,
43
+ MultiAdapter,
44
+ T2IAdapter,
45
+ UNet2DConditionModel,
46
+ )
47
+ from diffusers.models.attention_processor import (
48
+ AttnProcessor2_0,
49
+ LoRAAttnProcessor2_0,
50
+ LoRAXFormersAttnProcessor,
51
+ XFormersAttnProcessor,
52
+ )
53
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
54
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
55
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
56
+ from diffusers.schedulers import KarrasDiffusionSchedulers
57
+ from diffusers.utils import (
58
+ PIL_INTERPOLATION,
59
+ USE_PEFT_BACKEND,
60
+ logging,
61
+ replace_example_docstring,
62
+ scale_lora_layers,
63
+ unscale_lora_layers,
64
+ )
65
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
66
+
67
+
68
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
69
+
70
+ EXAMPLE_DOC_STRING = """
71
+ Examples:
72
+ ```py
73
+ >>> import torch
74
+ >>> from diffusers import DiffusionPipeline, T2IAdapter
75
+ >>> from diffusers.utils import load_image
76
+ >>> from PIL import Image
77
+ >>> from controlnet_aux.midas import MidasDetector
78
+
79
+ >>> adapter = T2IAdapter.from_pretrained(
80
+ ... "TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
81
+ ... ).to("cuda")
82
+
83
+ >>> controlnet = ControlNetModel.from_pretrained(
84
+ ... "diffusers/controlnet-depth-sdxl-1.0",
85
+ ... torch_dtype=torch.float16,
86
+ ... variant="fp16",
87
+ ... use_safetensors=True
88
+ ... ).to("cuda")
89
+
90
+ >>> pipe = DiffusionPipeline.from_pretrained(
91
+ ... "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
92
+ ... torch_dtype=torch.float16,
93
+ ... variant="fp16",
94
+ ... use_safetensors=True,
95
+ ... custom_pipeline="stable_diffusion_xl_adapter_controlnet_inpaint",
96
+ ... adapter=adapter,
97
+ ... controlnet=controlnet,
98
+ ... ).to("cuda")
99
+
100
+ >>> prompt = "a tiger sitting on a park bench"
101
+ >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
102
+ >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
103
+
104
+ >>> image = load_image(img_url).resize((1024, 1024))
105
+ >>> mask_image = load_image(mask_url).resize((1024, 1024))
106
+
107
+ >>> midas_depth = MidasDetector.from_pretrained(
108
+ ... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
109
+ ... ).to("cuda")
110
+
111
+ >>> depth_image = midas_depth(
112
+ ... image, detect_resolution=512, image_resolution=1024
113
+ ... )
114
+
115
+ >>> strength = 0.4
116
+
117
+ >>> generator = torch.manual_seed(42)
118
+
119
+ >>> result_image = pipe(
120
+ ... image=image,
121
+ ... mask_image=mask,
122
+ ... adapter_image=depth_image,
123
+ ... control_image=depth_image,
124
+ ... controlnet_conditioning_scale=strength,
125
+ ... adapter_conditioning_scale=strength,
126
+ ... strength=0.7,
127
+ ... generator=generator,
128
+ ... prompt=prompt,
129
+ ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",
130
+ ... num_inference_steps=50
131
+ ... ).images[0]
132
+ ```
133
+ """
134
+
135
+
136
+ def _preprocess_adapter_image(image, height, width):
137
+ if isinstance(image, torch.Tensor):
138
+ return image
139
+ elif isinstance(image, PIL.Image.Image):
140
+ image = [image]
141
+
142
+ if isinstance(image[0], PIL.Image.Image):
143
+ image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
144
+ image = [
145
+ i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
146
+ ] # expand [h, w] or [h, w, c] to [b, h, w, c]
147
+ image = np.concatenate(image, axis=0)
148
+ image = np.array(image).astype(np.float32) / 255.0
149
+ image = image.transpose(0, 3, 1, 2)
150
+ image = torch.from_numpy(image)
151
+ elif isinstance(image[0], torch.Tensor):
152
+ if image[0].ndim == 3:
153
+ image = torch.stack(image, dim=0)
154
+ elif image[0].ndim == 4:
155
+ image = torch.cat(image, dim=0)
156
+ else:
157
+ raise ValueError(
158
+ f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
159
+ )
160
+ return image
161
+
162
+
163
+ def mask_pil_to_torch(mask, height, width):
164
+ # preprocess mask
165
+ if isinstance(mask, Union[PIL.Image.Image, np.ndarray]):
166
+ mask = [mask]
167
+
168
+ if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
169
+ mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
170
+ mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
171
+ mask = mask.astype(np.float32) / 255.0
172
+ elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
173
+ mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
174
+
175
+ mask = torch.from_numpy(mask)
176
+ return mask
177
+
178
+
179
+ def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
180
+ """
181
+ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
182
+ converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
183
+ ``image`` and ``1`` for the ``mask``.
184
+
185
+ The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
186
+ binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
187
+
188
+ Args:
189
+ image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
190
+ It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
191
+ ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
192
+ mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
193
+ It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
194
+ ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
195
+
196
+
197
+ Raises:
198
+ ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
199
+ should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
200
+ TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
201
+ (ot the other way around).
202
+
203
+ Returns:
204
+ tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
205
+ dimensions: ``batch x channels x height x width``.
206
+ """
207
+
208
+ # checkpoint. TOD(Yiyi) - need to clean this up later
209
+ if image is None:
210
+ raise ValueError("`image` input cannot be undefined.")
211
+
212
+ if mask is None:
213
+ raise ValueError("`mask_image` input cannot be undefined.")
214
+
215
+ if isinstance(image, torch.Tensor):
216
+ if not isinstance(mask, torch.Tensor):
217
+ mask = mask_pil_to_torch(mask, height, width)
218
+
219
+ if image.ndim == 3:
220
+ image = image.unsqueeze(0)
221
+
222
+ # Batch and add channel dim for single mask
223
+ if mask.ndim == 2:
224
+ mask = mask.unsqueeze(0).unsqueeze(0)
225
+
226
+ # Batch single mask or add channel dim
227
+ if mask.ndim == 3:
228
+ # Single batched mask, no channel dim or single mask not batched but channel dim
229
+ if mask.shape[0] == 1:
230
+ mask = mask.unsqueeze(0)
231
+
232
+ # Batched masks no channel dim
233
+ else:
234
+ mask = mask.unsqueeze(1)
235
+
236
+ assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
237
+ # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
238
+ assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
239
+
240
+ # Check image is in [-1, 1]
241
+ # if image.min() < -1 or image.max() > 1:
242
+ # raise ValueError("Image should be in [-1, 1] range")
243
+
244
+ # Check mask is in [0, 1]
245
+ if mask.min() < 0 or mask.max() > 1:
246
+ raise ValueError("Mask should be in [0, 1] range")
247
+
248
+ # Binarize mask
249
+ mask[mask < 0.5] = 0
250
+ mask[mask >= 0.5] = 1
251
+
252
+ # Image as float32
253
+ image = image.to(dtype=torch.float32)
254
+ elif isinstance(mask, torch.Tensor):
255
+ raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
256
+ else:
257
+ # preprocess image
258
+ if isinstance(image, Union[PIL.Image.Image, np.ndarray]):
259
+ image = [image]
260
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
261
+ # resize all images w.r.t passed height an width
262
+ image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
263
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
264
+ image = np.concatenate(image, axis=0)
265
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
266
+ image = np.concatenate([i[None, :] for i in image], axis=0)
267
+
268
+ image = image.transpose(0, 3, 1, 2)
269
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
270
+
271
+ mask = mask_pil_to_torch(mask, height, width)
272
+ mask[mask < 0.5] = 0
273
+ mask[mask >= 0.5] = 1
274
+
275
+ if image.shape[1] == 4:
276
+ # images are in latent space and thus can't
277
+ # be masked set masked_image to None
278
+ # we assume that the checkpoint is not an inpainting
279
+ # checkpoint. TOD(Yiyi) - need to clean this up later
280
+ masked_image = None
281
+ else:
282
+ masked_image = image * (mask < 0.5)
283
+
284
+ # n.b. ensure backwards compatibility as old function does not return image
285
+ if return_image:
286
+ return mask, masked_image, image
287
+
288
+ return mask, masked_image
289
+
290
+
291
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
292
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
293
+ """
294
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
295
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
296
+ """
297
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
298
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
299
+ # rescale the results from guidance (fixes overexposure)
300
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
301
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
302
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
303
+ return noise_cfg
304
+
305
+
306
+ class StableDiffusionXLControlNetAdapterInpaintPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
307
+ r"""
308
+ Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
309
+ https://arxiv.org/abs/2302.08453
310
+
311
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
312
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
313
+
314
+ Args:
315
+ adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
316
+ Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
317
+ list, the outputs from each Adapter are added together to create one combined additional conditioning.
318
+ adapter_weights (`List[float]`, *optional*, defaults to None):
319
+ List of floats representing the weight which will be multiply to each adapter's output before adding them
320
+ together.
321
+ vae ([`AutoencoderKL`]):
322
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
323
+ text_encoder ([`CLIPTextModel`]):
324
+ Frozen text-encoder. Stable Diffusion uses the text portion of
325
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
326
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
327
+ tokenizer (`CLIPTokenizer`):
328
+ Tokenizer of class
329
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
330
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
331
+ scheduler ([`SchedulerMixin`]):
332
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
333
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
334
+ safety_checker ([`StableDiffusionSafetyChecker`]):
335
+ Classification module that estimates whether generated images could be considered offensive or harmful.
336
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
337
+ feature_extractor ([`CLIPFeatureExtractor`]):
338
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
339
+ requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
340
+ Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config
341
+ of `stabilityai/stable-diffusion-xl-refiner-1-0`.
342
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
343
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
344
+ `stabilityai/stable-diffusion-xl-base-1-0`.
345
+ """
346
+
347
+ def __init__(
348
+ self,
349
+ vae: AutoencoderKL,
350
+ text_encoder: CLIPTextModel,
351
+ text_encoder_2: CLIPTextModelWithProjection,
352
+ tokenizer: CLIPTokenizer,
353
+ tokenizer_2: CLIPTokenizer,
354
+ unet: UNet2DConditionModel,
355
+ adapter: Union[T2IAdapter, MultiAdapter],
356
+ controlnet: Union[ControlNetModel, MultiControlNetModel],
357
+ scheduler: KarrasDiffusionSchedulers,
358
+ requires_aesthetics_score: bool = False,
359
+ force_zeros_for_empty_prompt: bool = True,
360
+ ):
361
+ super().__init__()
362
+
363
+ if isinstance(controlnet, (list, tuple)):
364
+ controlnet = MultiControlNetModel(controlnet)
365
+
366
+ self.register_modules(
367
+ vae=vae,
368
+ text_encoder=text_encoder,
369
+ text_encoder_2=text_encoder_2,
370
+ tokenizer=tokenizer,
371
+ tokenizer_2=tokenizer_2,
372
+ unet=unet,
373
+ adapter=adapter,
374
+ controlnet=controlnet,
375
+ scheduler=scheduler,
376
+ )
377
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
378
+ self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
379
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
380
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
381
+ self.control_image_processor = VaeImageProcessor(
382
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
383
+ )
384
+ self.default_sample_size = self.unet.config.sample_size
385
+
386
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
387
+ def enable_vae_slicing(self):
388
+ r"""
389
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
390
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
391
+ """
392
+ self.vae.enable_slicing()
393
+
394
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
395
+ def disable_vae_slicing(self):
396
+ r"""
397
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
398
+ computing decoding in one step.
399
+ """
400
+ self.vae.disable_slicing()
401
+
402
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
403
+ def enable_vae_tiling(self):
404
+ r"""
405
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
406
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
407
+ processing larger images.
408
+ """
409
+ self.vae.enable_tiling()
410
+
411
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
412
+ def disable_vae_tiling(self):
413
+ r"""
414
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
415
+ computing decoding in one step.
416
+ """
417
+ self.vae.disable_tiling()
418
+
419
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
420
+ def encode_prompt(
421
+ self,
422
+ prompt: str,
423
+ prompt_2: Optional[str] = None,
424
+ device: Optional[torch.device] = None,
425
+ num_images_per_prompt: int = 1,
426
+ do_classifier_free_guidance: bool = True,
427
+ negative_prompt: Optional[str] = None,
428
+ negative_prompt_2: Optional[str] = None,
429
+ prompt_embeds: Optional[torch.FloatTensor] = None,
430
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
431
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
432
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
433
+ lora_scale: Optional[float] = None,
434
+ clip_skip: Optional[int] = None,
435
+ ):
436
+ r"""
437
+ Encodes the prompt into text encoder hidden states.
438
+
439
+ Args:
440
+ prompt (`str` or `List[str]`, *optional*):
441
+ prompt to be encoded
442
+ prompt_2 (`str` or `List[str]`, *optional*):
443
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
444
+ used in both text-encoders
445
+ device: (`torch.device`):
446
+ torch device
447
+ num_images_per_prompt (`int`):
448
+ number of images that should be generated per prompt
449
+ do_classifier_free_guidance (`bool`):
450
+ whether to use classifier free guidance or not
451
+ negative_prompt (`str` or `List[str]`, *optional*):
452
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
453
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
454
+ less than `1`).
455
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
456
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
457
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
458
+ prompt_embeds (`torch.FloatTensor`, *optional*):
459
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
460
+ provided, text embeddings will be generated from `prompt` input argument.
461
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
462
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
463
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
464
+ argument.
465
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
466
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
467
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
468
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
469
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
470
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
471
+ input argument.
472
+ lora_scale (`float`, *optional*):
473
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
474
+ clip_skip (`int`, *optional*):
475
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
476
+ the output of the pre-final layer will be used for computing the prompt embeddings.
477
+ """
478
+ device = device or self._execution_device
479
+
480
+ # set lora scale so that monkey patched LoRA
481
+ # function of text encoder can correctly access it
482
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
483
+ self._lora_scale = lora_scale
484
+
485
+ # dynamically adjust the LoRA scale
486
+ if self.text_encoder is not None:
487
+ if not USE_PEFT_BACKEND:
488
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
489
+ else:
490
+ scale_lora_layers(self.text_encoder, lora_scale)
491
+
492
+ if self.text_encoder_2 is not None:
493
+ if not USE_PEFT_BACKEND:
494
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
495
+ else:
496
+ scale_lora_layers(self.text_encoder_2, lora_scale)
497
+
498
+ prompt = [prompt] if isinstance(prompt, str) else prompt
499
+
500
+ if prompt is not None:
501
+ batch_size = len(prompt)
502
+ else:
503
+ batch_size = prompt_embeds.shape[0]
504
+
505
+ # Define tokenizers and text encoders
506
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
507
+ text_encoders = (
508
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
509
+ )
510
+
511
+ if prompt_embeds is None:
512
+ prompt_2 = prompt_2 or prompt
513
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
514
+
515
+ # textual inversion: procecss multi-vector tokens if necessary
516
+ prompt_embeds_list = []
517
+ prompts = [prompt, prompt_2]
518
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
519
+ if isinstance(self, TextualInversionLoaderMixin):
520
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
521
+
522
+ text_inputs = tokenizer(
523
+ prompt,
524
+ padding="max_length",
525
+ max_length=tokenizer.model_max_length,
526
+ truncation=True,
527
+ return_tensors="pt",
528
+ )
529
+
530
+ text_input_ids = text_inputs.input_ids
531
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
532
+
533
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
534
+ text_input_ids, untruncated_ids
535
+ ):
536
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
537
+ logger.warning(
538
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
539
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
540
+ )
541
+
542
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
543
+
544
+ # We are only ALWAYS interested in the pooled output of the final text encoder
545
+ pooled_prompt_embeds = prompt_embeds[0]
546
+ if clip_skip is None:
547
+ prompt_embeds = prompt_embeds.hidden_states[-2]
548
+ else:
549
+ # "2" because SDXL always indexes from the penultimate layer.
550
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
551
+
552
+ prompt_embeds_list.append(prompt_embeds)
553
+
554
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
555
+
556
+ # get unconditional embeddings for classifier free guidance
557
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
558
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
559
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
560
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
561
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
562
+ negative_prompt = negative_prompt or ""
563
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
564
+
565
+ # normalize str to list
566
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
567
+ negative_prompt_2 = (
568
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
569
+ )
570
+
571
+ uncond_tokens: List[str]
572
+ if prompt is not None and type(prompt) is not type(negative_prompt):
573
+ raise TypeError(
574
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
575
+ f" {type(prompt)}."
576
+ )
577
+ elif batch_size != len(negative_prompt):
578
+ raise ValueError(
579
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
580
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
581
+ " the batch size of `prompt`."
582
+ )
583
+ else:
584
+ uncond_tokens = [negative_prompt, negative_prompt_2]
585
+
586
+ negative_prompt_embeds_list = []
587
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
588
+ if isinstance(self, TextualInversionLoaderMixin):
589
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
590
+
591
+ max_length = prompt_embeds.shape[1]
592
+ uncond_input = tokenizer(
593
+ negative_prompt,
594
+ padding="max_length",
595
+ max_length=max_length,
596
+ truncation=True,
597
+ return_tensors="pt",
598
+ )
599
+
600
+ negative_prompt_embeds = text_encoder(
601
+ uncond_input.input_ids.to(device),
602
+ output_hidden_states=True,
603
+ )
604
+ # We are only ALWAYS interested in the pooled output of the final text encoder
605
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
606
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
607
+
608
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
609
+
610
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
611
+
612
+ if self.text_encoder_2 is not None:
613
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
614
+ else:
615
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
616
+
617
+ bs_embed, seq_len, _ = prompt_embeds.shape
618
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
619
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
620
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
621
+
622
+ if do_classifier_free_guidance:
623
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
624
+ seq_len = negative_prompt_embeds.shape[1]
625
+
626
+ if self.text_encoder_2 is not None:
627
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
628
+ else:
629
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
630
+
631
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
632
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
633
+
634
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
635
+ bs_embed * num_images_per_prompt, -1
636
+ )
637
+ if do_classifier_free_guidance:
638
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
639
+ bs_embed * num_images_per_prompt, -1
640
+ )
641
+
642
+ if self.text_encoder is not None:
643
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
644
+ # Retrieve the original scale by scaling back the LoRA layers
645
+ unscale_lora_layers(self.text_encoder, lora_scale)
646
+
647
+ if self.text_encoder_2 is not None:
648
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
649
+ # Retrieve the original scale by scaling back the LoRA layers
650
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
651
+
652
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
653
+
654
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
655
+ def prepare_extra_step_kwargs(self, generator, eta):
656
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
657
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
658
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
659
+ # and should be between [0, 1]
660
+
661
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
662
+ extra_step_kwargs = {}
663
+ if accepts_eta:
664
+ extra_step_kwargs["eta"] = eta
665
+
666
+ # check if the scheduler accepts generator
667
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
668
+ if accepts_generator:
669
+ extra_step_kwargs["generator"] = generator
670
+ return extra_step_kwargs
671
+
672
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
673
+ def check_image(self, image, prompt, prompt_embeds):
674
+ image_is_pil = isinstance(image, PIL.Image.Image)
675
+ image_is_tensor = isinstance(image, torch.Tensor)
676
+ image_is_np = isinstance(image, np.ndarray)
677
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
678
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
679
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
680
+
681
+ if (
682
+ not image_is_pil
683
+ and not image_is_tensor
684
+ and not image_is_np
685
+ and not image_is_pil_list
686
+ and not image_is_tensor_list
687
+ and not image_is_np_list
688
+ ):
689
+ raise TypeError(
690
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
691
+ )
692
+
693
+ if image_is_pil:
694
+ image_batch_size = 1
695
+ else:
696
+ image_batch_size = len(image)
697
+
698
+ if prompt is not None and isinstance(prompt, str):
699
+ prompt_batch_size = 1
700
+ elif prompt is not None and isinstance(prompt, list):
701
+ prompt_batch_size = len(prompt)
702
+ elif prompt_embeds is not None:
703
+ prompt_batch_size = prompt_embeds.shape[0]
704
+
705
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
706
+ raise ValueError(
707
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
708
+ )
709
+
710
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs
711
+ def check_inputs(
712
+ self,
713
+ prompt,
714
+ prompt_2,
715
+ height,
716
+ width,
717
+ callback_steps,
718
+ negative_prompt=None,
719
+ negative_prompt_2=None,
720
+ prompt_embeds=None,
721
+ negative_prompt_embeds=None,
722
+ pooled_prompt_embeds=None,
723
+ negative_pooled_prompt_embeds=None,
724
+ callback_on_step_end_tensor_inputs=None,
725
+ ):
726
+ if height % 8 != 0 or width % 8 != 0:
727
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
728
+
729
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
730
+ raise ValueError(
731
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
732
+ f" {type(callback_steps)}."
733
+ )
734
+
735
+ if callback_on_step_end_tensor_inputs is not None and not all(
736
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
737
+ ):
738
+ raise ValueError(
739
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
740
+ )
741
+
742
+ if prompt is not None and prompt_embeds is not None:
743
+ raise ValueError(
744
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
745
+ " only forward one of the two."
746
+ )
747
+ elif prompt_2 is not None and prompt_embeds is not None:
748
+ raise ValueError(
749
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
750
+ " only forward one of the two."
751
+ )
752
+ elif prompt is None and prompt_embeds is None:
753
+ raise ValueError(
754
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
755
+ )
756
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
757
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
758
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
759
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
760
+
761
+ if negative_prompt is not None and negative_prompt_embeds is not None:
762
+ raise ValueError(
763
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
764
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
765
+ )
766
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
767
+ raise ValueError(
768
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
769
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
770
+ )
771
+
772
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
773
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
774
+ raise ValueError(
775
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
776
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
777
+ f" {negative_prompt_embeds.shape}."
778
+ )
779
+
780
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
781
+ raise ValueError(
782
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
783
+ )
784
+
785
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
786
+ raise ValueError(
787
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
788
+ )
789
+
790
+ def check_conditions(
791
+ self,
792
+ prompt,
793
+ prompt_embeds,
794
+ adapter_image,
795
+ control_image,
796
+ adapter_conditioning_scale,
797
+ controlnet_conditioning_scale,
798
+ control_guidance_start,
799
+ control_guidance_end,
800
+ ):
801
+ # controlnet checks
802
+ if not isinstance(control_guidance_start, (tuple, list)):
803
+ control_guidance_start = [control_guidance_start]
804
+
805
+ if not isinstance(control_guidance_end, (tuple, list)):
806
+ control_guidance_end = [control_guidance_end]
807
+
808
+ if len(control_guidance_start) != len(control_guidance_end):
809
+ raise ValueError(
810
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
811
+ )
812
+
813
+ if isinstance(self.controlnet, MultiControlNetModel):
814
+ if len(control_guidance_start) != len(self.controlnet.nets):
815
+ raise ValueError(
816
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
817
+ )
818
+
819
+ for start, end in zip(control_guidance_start, control_guidance_end):
820
+ if start >= end:
821
+ raise ValueError(
822
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
823
+ )
824
+ if start < 0.0:
825
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
826
+ if end > 1.0:
827
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
828
+
829
+ # Check controlnet `image`
830
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
831
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
832
+ )
833
+ if (
834
+ isinstance(self.controlnet, ControlNetModel)
835
+ or is_compiled
836
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
837
+ ):
838
+ self.check_image(control_image, prompt, prompt_embeds)
839
+ elif (
840
+ isinstance(self.controlnet, MultiControlNetModel)
841
+ or is_compiled
842
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
843
+ ):
844
+ if not isinstance(control_image, list):
845
+ raise TypeError("For multiple controlnets: `control_image` must be type `list`")
846
+
847
+ # When `image` is a nested list:
848
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
849
+ elif any(isinstance(i, list) for i in control_image):
850
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
851
+ elif len(control_image) != len(self.controlnet.nets):
852
+ raise ValueError(
853
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets."
854
+ )
855
+
856
+ for image_ in control_image:
857
+ self.check_image(image_, prompt, prompt_embeds)
858
+ else:
859
+ assert False
860
+
861
+ # Check `controlnet_conditioning_scale`
862
+ if (
863
+ isinstance(self.controlnet, ControlNetModel)
864
+ or is_compiled
865
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
866
+ ):
867
+ if not isinstance(controlnet_conditioning_scale, float):
868
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
869
+ elif (
870
+ isinstance(self.controlnet, MultiControlNetModel)
871
+ or is_compiled
872
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
873
+ ):
874
+ if isinstance(controlnet_conditioning_scale, list):
875
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
876
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
877
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
878
+ self.controlnet.nets
879
+ ):
880
+ raise ValueError(
881
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
882
+ " the same length as the number of controlnets"
883
+ )
884
+ else:
885
+ assert False
886
+
887
+ # adapter checks
888
+ if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
889
+ self.check_image(adapter_image, prompt, prompt_embeds)
890
+ elif (
891
+ isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
892
+ ):
893
+ if not isinstance(adapter_image, list):
894
+ raise TypeError("For multiple adapters: `adapter_image` must be type `list`")
895
+
896
+ # When `image` is a nested list:
897
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
898
+ elif any(isinstance(i, list) for i in adapter_image):
899
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
900
+ elif len(adapter_image) != len(self.adapter.adapters):
901
+ raise ValueError(
902
+ f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters."
903
+ )
904
+
905
+ for image_ in adapter_image:
906
+ self.check_image(image_, prompt, prompt_embeds)
907
+ else:
908
+ assert False
909
+
910
+ # Check `adapter_conditioning_scale`
911
+ if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
912
+ if not isinstance(adapter_conditioning_scale, float):
913
+ raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.")
914
+ elif (
915
+ isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
916
+ ):
917
+ if isinstance(adapter_conditioning_scale, list):
918
+ if any(isinstance(i, list) for i in adapter_conditioning_scale):
919
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
920
+ elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len(
921
+ self.adapter.adapters
922
+ ):
923
+ raise ValueError(
924
+ "For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have"
925
+ " the same length as the number of adapters"
926
+ )
927
+ else:
928
+ assert False
929
+
930
+ def prepare_latents(
931
+ self,
932
+ batch_size,
933
+ num_channels_latents,
934
+ height,
935
+ width,
936
+ dtype,
937
+ device,
938
+ generator,
939
+ latents=None,
940
+ image=None,
941
+ timestep=None,
942
+ is_strength_max=True,
943
+ add_noise=True,
944
+ return_noise=False,
945
+ return_image_latents=False,
946
+ ):
947
+ shape = (
948
+ batch_size,
949
+ num_channels_latents,
950
+ height // self.vae_scale_factor,
951
+ width // self.vae_scale_factor,
952
+ )
953
+ if isinstance(generator, list) and len(generator) != batch_size:
954
+ raise ValueError(
955
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
956
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
957
+ )
958
+
959
+ if (image is None or timestep is None) and not is_strength_max:
960
+ raise ValueError(
961
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
962
+ "However, either the image or the noise timestep has not been provided."
963
+ )
964
+
965
+ if image.shape[1] == 4:
966
+ image_latents = image.to(device=device, dtype=dtype)
967
+ elif return_image_latents or (latents is None and not is_strength_max):
968
+ image = image.to(device=device, dtype=dtype)
969
+ image_latents = self._encode_vae_image(image=image, generator=generator)
970
+
971
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
972
+
973
+ if latents is None and add_noise:
974
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
975
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
976
+ latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
977
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
978
+ latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
979
+ elif add_noise:
980
+ noise = latents.to(device)
981
+ latents = noise * self.scheduler.init_noise_sigma
982
+ else:
983
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
984
+ latents = image_latents.to(device)
985
+
986
+ outputs = (latents,)
987
+
988
+ if return_noise:
989
+ outputs += (noise,)
990
+
991
+ if return_image_latents:
992
+ outputs += (image_latents,)
993
+
994
+ return outputs
995
+
996
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
997
+ dtype = image.dtype
998
+ if self.vae.config.force_upcast:
999
+ image = image.float()
1000
+ self.vae.to(dtype=torch.float32)
1001
+
1002
+ if isinstance(generator, list):
1003
+ image_latents = [
1004
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
1005
+ for i in range(image.shape[0])
1006
+ ]
1007
+ image_latents = torch.cat(image_latents, dim=0)
1008
+ else:
1009
+ image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
1010
+
1011
+ if self.vae.config.force_upcast:
1012
+ self.vae.to(dtype)
1013
+
1014
+ image_latents = image_latents.to(dtype)
1015
+ image_latents = self.vae.config.scaling_factor * image_latents
1016
+
1017
+ return image_latents
1018
+
1019
+ def prepare_mask_latents(
1020
+ self,
1021
+ mask,
1022
+ masked_image,
1023
+ batch_size,
1024
+ height,
1025
+ width,
1026
+ dtype,
1027
+ device,
1028
+ generator,
1029
+ do_classifier_free_guidance,
1030
+ ):
1031
+ # resize the mask to latents shape as we concatenate the mask to the latents
1032
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
1033
+ # and half precision
1034
+ mask = torch.nn.functional.interpolate(
1035
+ mask,
1036
+ size=(
1037
+ height // self.vae_scale_factor,
1038
+ width // self.vae_scale_factor,
1039
+ ),
1040
+ )
1041
+ mask = mask.to(device=device, dtype=dtype)
1042
+
1043
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
1044
+ if mask.shape[0] < batch_size:
1045
+ if not batch_size % mask.shape[0] == 0:
1046
+ raise ValueError(
1047
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
1048
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
1049
+ " of masks that you pass is divisible by the total requested batch size."
1050
+ )
1051
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
1052
+
1053
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
1054
+
1055
+ masked_image_latents = None
1056
+ if masked_image is not None:
1057
+ masked_image = masked_image.to(device=device, dtype=dtype)
1058
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
1059
+ if masked_image_latents.shape[0] < batch_size:
1060
+ if not batch_size % masked_image_latents.shape[0] == 0:
1061
+ raise ValueError(
1062
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
1063
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
1064
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
1065
+ )
1066
+ masked_image_latents = masked_image_latents.repeat(
1067
+ batch_size // masked_image_latents.shape[0], 1, 1, 1
1068
+ )
1069
+
1070
+ masked_image_latents = (
1071
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
1072
+ )
1073
+
1074
+ # aligning device to prevent device errors when concating it with the latent model input
1075
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
1076
+
1077
+ return mask, masked_image_latents
1078
+
1079
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
1080
+ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
1081
+ # get the original timestep using init_timestep
1082
+ if denoising_start is None:
1083
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
1084
+ t_start = max(num_inference_steps - init_timestep, 0)
1085
+ else:
1086
+ t_start = 0
1087
+
1088
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
1089
+
1090
+ # Strength is irrelevant if we directly request a timestep to start at;
1091
+ # that is, strength is determined by the denoising_start instead.
1092
+ if denoising_start is not None:
1093
+ discrete_timestep_cutoff = int(
1094
+ round(
1095
+ self.scheduler.config.num_train_timesteps
1096
+ - (denoising_start * self.scheduler.config.num_train_timesteps)
1097
+ )
1098
+ )
1099
+
1100
+ num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
1101
+ if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
1102
+ # if the scheduler is a 2nd order scheduler we might have to do +1
1103
+ # because `num_inference_steps` might be even given that every timestep
1104
+ # (except the highest one) is duplicated. If `num_inference_steps` is even it would
1105
+ # mean that we cut the timesteps in the middle of the denoising step
1106
+ # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1
1107
+ # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
1108
+ num_inference_steps = num_inference_steps + 1
1109
+
1110
+ # because t_n+1 >= t_n, we slice the timesteps starting from the end
1111
+ timesteps = timesteps[-num_inference_steps:]
1112
+ return timesteps, num_inference_steps
1113
+
1114
+ return timesteps, num_inference_steps - t_start
1115
+
1116
+ def _get_add_time_ids(
1117
+ self,
1118
+ original_size,
1119
+ crops_coords_top_left,
1120
+ target_size,
1121
+ aesthetic_score,
1122
+ negative_aesthetic_score,
1123
+ dtype,
1124
+ text_encoder_projection_dim=None,
1125
+ ):
1126
+ if self.config.requires_aesthetics_score:
1127
+ add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
1128
+ add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
1129
+ else:
1130
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
1131
+ add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
1132
+
1133
+ passed_add_embed_dim = (
1134
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
1135
+ )
1136
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
1137
+
1138
+ if (
1139
+ expected_add_embed_dim > passed_add_embed_dim
1140
+ and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
1141
+ ):
1142
+ raise ValueError(
1143
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
1144
+ )
1145
+ elif (
1146
+ expected_add_embed_dim < passed_add_embed_dim
1147
+ and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
1148
+ ):
1149
+ raise ValueError(
1150
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
1151
+ )
1152
+ elif expected_add_embed_dim != passed_add_embed_dim:
1153
+ raise ValueError(
1154
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
1155
+ )
1156
+
1157
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1158
+ add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
1159
+
1160
+ return add_time_ids, add_neg_time_ids
1161
+
1162
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1163
+ def upcast_vae(self):
1164
+ dtype = self.vae.dtype
1165
+ self.vae.to(dtype=torch.float32)
1166
+ use_torch_2_0_or_xformers = isinstance(
1167
+ self.vae.decoder.mid_block.attentions[0].processor,
1168
+ (
1169
+ AttnProcessor2_0,
1170
+ XFormersAttnProcessor,
1171
+ LoRAXFormersAttnProcessor,
1172
+ LoRAAttnProcessor2_0,
1173
+ ),
1174
+ )
1175
+ # if xformers or torch_2_0 is used attention block does not need
1176
+ # to be in float32 which can save lots of memory
1177
+ if use_torch_2_0_or_xformers:
1178
+ self.vae.post_quant_conv.to(dtype)
1179
+ self.vae.decoder.conv_in.to(dtype)
1180
+ self.vae.decoder.mid_block.to(dtype)
1181
+
1182
+ # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
1183
+ def _default_height_width(self, height, width, image):
1184
+ # NOTE: It is possible that a list of images have different
1185
+ # dimensions for each image, so just checking the first image
1186
+ # is not _exactly_ correct, but it is simple.
1187
+ while isinstance(image, list):
1188
+ image = image[0]
1189
+
1190
+ if height is None:
1191
+ if isinstance(image, PIL.Image.Image):
1192
+ height = image.height
1193
+ elif isinstance(image, torch.Tensor):
1194
+ height = image.shape[-2]
1195
+
1196
+ # round down to nearest multiple of `self.adapter.downscale_factor`
1197
+ height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
1198
+
1199
+ if width is None:
1200
+ if isinstance(image, PIL.Image.Image):
1201
+ width = image.width
1202
+ elif isinstance(image, torch.Tensor):
1203
+ width = image.shape[-1]
1204
+
1205
+ # round down to nearest multiple of `self.adapter.downscale_factor`
1206
+ width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
1207
+
1208
+ return height, width
1209
+
1210
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
1211
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
1212
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
1213
+
1214
+ The suffixes after the scaling factors represent the stages where they are being applied.
1215
+
1216
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
1217
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
1218
+
1219
+ Args:
1220
+ s1 (`float`):
1221
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
1222
+ mitigate "oversmoothing effect" in the enhanced denoising process.
1223
+ s2 (`float`):
1224
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
1225
+ mitigate "oversmoothing effect" in the enhanced denoising process.
1226
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
1227
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
1228
+ """
1229
+ if not hasattr(self, "unet"):
1230
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
1231
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
1232
+
1233
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
1234
+ def disable_freeu(self):
1235
+ """Disables the FreeU mechanism if enabled."""
1236
+ self.unet.disable_freeu()
1237
+
1238
+ def prepare_control_image(
1239
+ self,
1240
+ image,
1241
+ width,
1242
+ height,
1243
+ batch_size,
1244
+ num_images_per_prompt,
1245
+ device,
1246
+ dtype,
1247
+ do_classifier_free_guidance=False,
1248
+ guess_mode=False,
1249
+ ):
1250
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
1251
+ image_batch_size = image.shape[0]
1252
+
1253
+ if image_batch_size == 1:
1254
+ repeat_by = batch_size
1255
+ else:
1256
+ # image batch size is the same as prompt batch size
1257
+ repeat_by = num_images_per_prompt
1258
+
1259
+ image = image.repeat_interleave(repeat_by, dim=0)
1260
+
1261
+ image = image.to(device=device, dtype=dtype)
1262
+
1263
+ if do_classifier_free_guidance and not guess_mode:
1264
+ image = torch.cat([image] * 2)
1265
+
1266
+ return image
1267
+
1268
+ @torch.no_grad()
1269
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
1270
+ def __call__(
1271
+ self,
1272
+ prompt: Optional[Union[str, list[str]]] = None,
1273
+ prompt_2: Optional[Union[str, list[str]]] = None,
1274
+ image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
1275
+ mask_image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
1276
+ adapter_image: PipelineImageInput = None,
1277
+ control_image: PipelineImageInput = None,
1278
+ height: Optional[int] = None,
1279
+ width: Optional[int] = None,
1280
+ strength: float = 0.9999,
1281
+ num_inference_steps: int = 50,
1282
+ denoising_start: Optional[float] = None,
1283
+ denoising_end: Optional[float] = None,
1284
+ guidance_scale: float = 5.0,
1285
+ negative_prompt: Optional[Union[str, list[str]]] = None,
1286
+ negative_prompt_2: Optional[Union[str, list[str]]] = None,
1287
+ num_images_per_prompt: Optional[int] = 1,
1288
+ eta: float = 0.0,
1289
+ generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None,
1290
+ latents: Optional[Union[torch.FloatTensor]] = None,
1291
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1292
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1293
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1294
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1295
+ output_type: Optional[str] = "pil",
1296
+ return_dict: bool = True,
1297
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1298
+ callback_steps: int = 1,
1299
+ cross_attention_kwargs: Optional[dict[str, Any]] = None,
1300
+ guidance_rescale: float = 0.0,
1301
+ original_size: Optional[tuple[int, int]] = None,
1302
+ crops_coords_top_left: Optional[tuple[int, int]] = (0, 0),
1303
+ target_size: Optional[tuple[int, int]] = None,
1304
+ adapter_conditioning_scale: Optional[Union[float, list[float]]] = 1.0,
1305
+ cond_tau: float = 1.0,
1306
+ aesthetic_score: float = 6.0,
1307
+ negative_aesthetic_score: float = 2.5,
1308
+ controlnet_conditioning_scale=1.0,
1309
+ guess_mode: bool = False,
1310
+ control_guidance_start=0.0,
1311
+ control_guidance_end=1.0,
1312
+ ):
1313
+ r"""
1314
+ Function invoked when calling the pipeline for generation.
1315
+
1316
+ Args:
1317
+ prompt (`str` or `List[str]`, *optional*):
1318
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1319
+ instead.
1320
+ prompt_2 (`str` or `List[str]`, *optional*):
1321
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1322
+ used in both text-encoders
1323
+ image (`PIL.Image.Image`):
1324
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
1325
+ be masked out with `mask_image` and repainted according to `prompt`.
1326
+ mask_image (`PIL.Image.Image`):
1327
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1328
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
1329
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
1330
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
1331
+ adapter_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
1332
+ The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
1333
+ type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
1334
+ accepted as an image. The control image is automatically resized to fit the output image.
1335
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1336
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1337
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
1338
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
1339
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
1340
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
1341
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
1342
+ input to a single ControlNet.
1343
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1344
+ The height in pixels of the generated image.
1345
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1346
+ The width in pixels of the generated image.
1347
+ strength (`float`, *optional*, defaults to 1.0):
1348
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
1349
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
1350
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
1351
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
1352
+ essentially ignores `image`.
1353
+ num_inference_steps (`int`, *optional*, defaults to 50):
1354
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1355
+ expense of slower inference.
1356
+ denoising_start (`float`, *optional*):
1357
+ When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
1358
+ bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
1359
+ it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
1360
+ strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
1361
+ is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
1362
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1363
+ denoising_end (`float`, *optional*):
1364
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1365
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
1366
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
1367
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
1368
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1369
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
1370
+ guidance_scale (`float`, *optional*, defaults to 5.0):
1371
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1372
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1373
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1374
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1375
+ usually at the expense of lower image quality.
1376
+ negative_prompt (`str` or `List[str]`, *optional*):
1377
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1378
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1379
+ less than `1`).
1380
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1381
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1382
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1383
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1384
+ The number of images to generate per prompt.
1385
+ eta (`float`, *optional*, defaults to 0.0):
1386
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1387
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1388
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1389
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1390
+ to make generation deterministic.
1391
+ latents (`torch.FloatTensor`, *optional*):
1392
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1393
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1394
+ tensor will ge generated by sampling using the supplied random `generator`.
1395
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1396
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1397
+ provided, text embeddings will be generated from `prompt` input argument.
1398
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1399
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1400
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1401
+ argument.
1402
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1403
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1404
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1405
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1406
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1407
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1408
+ input argument.
1409
+ output_type (`str`, *optional*, defaults to `"pil"`):
1410
+ The output format of the generate image. Choose between
1411
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1412
+ return_dict (`bool`, *optional*, defaults to `True`):
1413
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`]
1414
+ instead of a plain tuple.
1415
+ callback (`Callable`, *optional*):
1416
+ A function that will be called every `callback_steps` steps during inference. The function will be
1417
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1418
+ callback_steps (`int`, *optional*, defaults to 1):
1419
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1420
+ called at every step.
1421
+ cross_attention_kwargs (`dict`, *optional*):
1422
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1423
+ `self.processor` in
1424
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1425
+ guidance_rescale (`float`, *optional*, defaults to 0.7):
1426
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1427
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1428
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1429
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
1430
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1431
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1432
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
1433
+ explained in section 2.2 of
1434
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1435
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1436
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1437
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1438
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1439
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1440
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1441
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1442
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
1443
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1444
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1445
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the
1446
+ residual in the original unet. If multiple adapters are specified in init, you can set the
1447
+ corresponding scale as a list.
1448
+ adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1449
+ The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
1450
+ residual in the original unet. If multiple adapters are specified in init, you can set the
1451
+ corresponding scale as a list.
1452
+ aesthetic_score (`float`, *optional*, defaults to 6.0):
1453
+ Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1454
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1455
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1456
+ negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1457
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1458
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1459
+ simulate an aesthetic score of the generated image by influencing the negative text condition.
1460
+ Examples:
1461
+
1462
+ Returns:
1463
+ [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
1464
+ [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
1465
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
1466
+ """
1467
+ # 0. Default height and width to unet
1468
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1469
+ adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter
1470
+ height, width = self._default_height_width(height, width, adapter_image)
1471
+ device = self._execution_device
1472
+
1473
+ if isinstance(adapter, MultiAdapter):
1474
+ adapter_input = []
1475
+ for one_image in adapter_image:
1476
+ one_image = _preprocess_adapter_image(one_image, height, width)
1477
+ one_image = one_image.to(device=device, dtype=adapter.dtype)
1478
+ adapter_input.append(one_image)
1479
+ else:
1480
+ adapter_input = _preprocess_adapter_image(adapter_image, height, width)
1481
+ adapter_input = adapter_input.to(device=device, dtype=adapter.dtype)
1482
+
1483
+ original_size = original_size or (height, width)
1484
+ target_size = target_size or (height, width)
1485
+
1486
+ # 0.1 align format for control guidance
1487
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1488
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1489
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1490
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1491
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1492
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1493
+ control_guidance_start, control_guidance_end = (
1494
+ mult * [control_guidance_start],
1495
+ mult * [control_guidance_end],
1496
+ )
1497
+
1498
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1499
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1500
+ if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float):
1501
+ adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.nets)
1502
+
1503
+ # 1. Check inputs. Raise error if not correct
1504
+ self.check_inputs(
1505
+ prompt,
1506
+ prompt_2,
1507
+ height,
1508
+ width,
1509
+ callback_steps,
1510
+ negative_prompt=negative_prompt,
1511
+ negative_prompt_2=negative_prompt_2,
1512
+ prompt_embeds=prompt_embeds,
1513
+ negative_prompt_embeds=negative_prompt_embeds,
1514
+ pooled_prompt_embeds=pooled_prompt_embeds,
1515
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1516
+ )
1517
+
1518
+ self.check_conditions(
1519
+ prompt,
1520
+ prompt_embeds,
1521
+ adapter_image,
1522
+ control_image,
1523
+ adapter_conditioning_scale,
1524
+ controlnet_conditioning_scale,
1525
+ control_guidance_start,
1526
+ control_guidance_end,
1527
+ )
1528
+
1529
+ # 2. Define call parameters
1530
+ if prompt is not None and isinstance(prompt, str):
1531
+ batch_size = 1
1532
+ elif prompt is not None and isinstance(prompt, list):
1533
+ batch_size = len(prompt)
1534
+ else:
1535
+ batch_size = prompt_embeds.shape[0]
1536
+
1537
+ device = self._execution_device
1538
+
1539
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1540
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1541
+ # corresponds to doing no classifier free guidance.
1542
+ do_classifier_free_guidance = guidance_scale > 1.0
1543
+
1544
+ # 3. Encode input prompt
1545
+ (
1546
+ prompt_embeds,
1547
+ negative_prompt_embeds,
1548
+ pooled_prompt_embeds,
1549
+ negative_pooled_prompt_embeds,
1550
+ ) = self.encode_prompt(
1551
+ prompt=prompt,
1552
+ prompt_2=prompt_2,
1553
+ device=device,
1554
+ num_images_per_prompt=num_images_per_prompt,
1555
+ do_classifier_free_guidance=do_classifier_free_guidance,
1556
+ negative_prompt=negative_prompt,
1557
+ negative_prompt_2=negative_prompt_2,
1558
+ prompt_embeds=prompt_embeds,
1559
+ negative_prompt_embeds=negative_prompt_embeds,
1560
+ pooled_prompt_embeds=pooled_prompt_embeds,
1561
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1562
+ )
1563
+
1564
+ # 4. set timesteps
1565
+ def denoising_value_valid(dnv):
1566
+ return isinstance(denoising_end, float) and 0 < dnv < 1
1567
+
1568
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1569
+ timesteps, num_inference_steps = self.get_timesteps(
1570
+ num_inference_steps,
1571
+ strength,
1572
+ device,
1573
+ denoising_start=denoising_start if denoising_value_valid else None,
1574
+ )
1575
+ # check that number of inference steps is not < 1 - as this doesn't make sense
1576
+ if num_inference_steps < 1:
1577
+ raise ValueError(
1578
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1579
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1580
+ )
1581
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1582
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1583
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1584
+ is_strength_max = strength == 1.0
1585
+
1586
+ # 5. Preprocess mask and image - resizes image and mask w.r.t height and width
1587
+ mask, masked_image, init_image = prepare_mask_and_masked_image(
1588
+ image, mask_image, height, width, return_image=True
1589
+ )
1590
+
1591
+ # 6. Prepare latent variables
1592
+ num_channels_latents = self.vae.config.latent_channels
1593
+ num_channels_unet = self.unet.config.in_channels
1594
+ return_image_latents = num_channels_unet == 4
1595
+
1596
+ add_noise = denoising_start is None
1597
+ latents_outputs = self.prepare_latents(
1598
+ batch_size * num_images_per_prompt,
1599
+ num_channels_latents,
1600
+ height,
1601
+ width,
1602
+ prompt_embeds.dtype,
1603
+ device,
1604
+ generator,
1605
+ latents,
1606
+ image=init_image,
1607
+ timestep=latent_timestep,
1608
+ is_strength_max=is_strength_max,
1609
+ add_noise=add_noise,
1610
+ return_noise=True,
1611
+ return_image_latents=return_image_latents,
1612
+ )
1613
+
1614
+ if return_image_latents:
1615
+ latents, noise, image_latents = latents_outputs
1616
+ else:
1617
+ latents, noise = latents_outputs
1618
+
1619
+ # 7. Prepare mask latent variables
1620
+ mask, masked_image_latents = self.prepare_mask_latents(
1621
+ mask,
1622
+ masked_image,
1623
+ batch_size * num_images_per_prompt,
1624
+ height,
1625
+ width,
1626
+ prompt_embeds.dtype,
1627
+ device,
1628
+ generator,
1629
+ do_classifier_free_guidance,
1630
+ )
1631
+
1632
+ # 8. Check that sizes of mask, masked image and latents match
1633
+ if num_channels_unet == 9:
1634
+ # default case for runwayml/stable-diffusion-inpainting
1635
+ num_channels_mask = mask.shape[1]
1636
+ num_channels_masked_image = masked_image_latents.shape[1]
1637
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1638
+ raise ValueError(
1639
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1640
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1641
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1642
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
1643
+ " `pipeline.unet` or your `mask_image` or `image` input."
1644
+ )
1645
+ elif num_channels_unet != 4:
1646
+ raise ValueError(
1647
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1648
+ )
1649
+
1650
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1651
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1652
+
1653
+ # 10. Prepare added time ids & embeddings & adapter features
1654
+ if isinstance(adapter, MultiAdapter):
1655
+ adapter_state = adapter(adapter_input, adapter_conditioning_scale)
1656
+ for k, v in enumerate(adapter_state):
1657
+ adapter_state[k] = v
1658
+ else:
1659
+ adapter_state = adapter(adapter_input)
1660
+ for k, v in enumerate(adapter_state):
1661
+ adapter_state[k] = v * adapter_conditioning_scale
1662
+ if num_images_per_prompt > 1:
1663
+ for k, v in enumerate(adapter_state):
1664
+ adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
1665
+ if do_classifier_free_guidance:
1666
+ for k, v in enumerate(adapter_state):
1667
+ adapter_state[k] = torch.cat([v] * 2, dim=0)
1668
+
1669
+ # 10.2 Prepare control images
1670
+ if isinstance(controlnet, ControlNetModel):
1671
+ control_image = self.prepare_control_image(
1672
+ image=control_image,
1673
+ width=width,
1674
+ height=height,
1675
+ batch_size=batch_size * num_images_per_prompt,
1676
+ num_images_per_prompt=num_images_per_prompt,
1677
+ device=device,
1678
+ dtype=controlnet.dtype,
1679
+ do_classifier_free_guidance=do_classifier_free_guidance,
1680
+ guess_mode=guess_mode,
1681
+ )
1682
+ elif isinstance(controlnet, MultiControlNetModel):
1683
+ control_images = []
1684
+
1685
+ for control_image_ in control_image:
1686
+ control_image_ = self.prepare_control_image(
1687
+ image=control_image_,
1688
+ width=width,
1689
+ height=height,
1690
+ batch_size=batch_size * num_images_per_prompt,
1691
+ num_images_per_prompt=num_images_per_prompt,
1692
+ device=device,
1693
+ dtype=controlnet.dtype,
1694
+ do_classifier_free_guidance=do_classifier_free_guidance,
1695
+ guess_mode=guess_mode,
1696
+ )
1697
+
1698
+ control_images.append(control_image_)
1699
+
1700
+ control_image = control_images
1701
+ else:
1702
+ raise ValueError(f"{controlnet.__class__} is not supported.")
1703
+
1704
+ # 8.2 Create tensor stating which controlnets to keep
1705
+ controlnet_keep = []
1706
+ for i in range(len(timesteps)):
1707
+ keeps = [
1708
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1709
+ for s, e in zip(control_guidance_start, control_guidance_end)
1710
+ ]
1711
+ if isinstance(self.controlnet, MultiControlNetModel):
1712
+ controlnet_keep.append(keeps)
1713
+ else:
1714
+ controlnet_keep.append(keeps[0])
1715
+ # ----------------------------------------------------------------
1716
+
1717
+ add_text_embeds = pooled_prompt_embeds
1718
+ if self.text_encoder_2 is None:
1719
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1720
+ else:
1721
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1722
+
1723
+ add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1724
+ original_size,
1725
+ crops_coords_top_left,
1726
+ target_size,
1727
+ aesthetic_score,
1728
+ negative_aesthetic_score,
1729
+ dtype=prompt_embeds.dtype,
1730
+ text_encoder_projection_dim=text_encoder_projection_dim,
1731
+ )
1732
+ add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1733
+
1734
+ if do_classifier_free_guidance:
1735
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1736
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1737
+ add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1738
+ add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
1739
+
1740
+ prompt_embeds = prompt_embeds.to(device)
1741
+ add_text_embeds = add_text_embeds.to(device)
1742
+ add_time_ids = add_time_ids.to(device)
1743
+
1744
+ # 11. Denoising loop
1745
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1746
+
1747
+ # 11.1 Apply denoising_end
1748
+ if (
1749
+ denoising_end is not None
1750
+ and denoising_start is not None
1751
+ and denoising_value_valid(denoising_end)
1752
+ and denoising_value_valid(denoising_start)
1753
+ and denoising_start >= denoising_end
1754
+ ):
1755
+ raise ValueError(
1756
+ f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
1757
+ + f" {denoising_end} when using type float."
1758
+ )
1759
+ elif denoising_end is not None and denoising_value_valid(denoising_end):
1760
+ discrete_timestep_cutoff = int(
1761
+ round(
1762
+ self.scheduler.config.num_train_timesteps
1763
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
1764
+ )
1765
+ )
1766
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1767
+ timesteps = timesteps[:num_inference_steps]
1768
+
1769
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1770
+ for i, t in enumerate(timesteps):
1771
+ # expand the latents if we are doing classifier free guidance
1772
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1773
+
1774
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1775
+
1776
+ if num_channels_unet == 9:
1777
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1778
+
1779
+ # predict the noise residual
1780
+ added_cond_kwargs = {
1781
+ "text_embeds": add_text_embeds,
1782
+ "time_ids": add_time_ids,
1783
+ }
1784
+
1785
+ if i < int(num_inference_steps * cond_tau):
1786
+ down_block_additional_residuals = [state.clone() for state in adapter_state]
1787
+ else:
1788
+ down_block_additional_residuals = None
1789
+
1790
+ # ----------- ControlNet
1791
+
1792
+ # expand the latents if we are doing classifier free guidance
1793
+ latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1794
+
1795
+ # concat latents, mask, masked_image_latents in the channel dimension
1796
+ latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t)
1797
+
1798
+ # controlnet(s) inference
1799
+ if guess_mode and do_classifier_free_guidance:
1800
+ # Infer ControlNet only for the conditional batch.
1801
+ control_model_input = latents
1802
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1803
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1804
+ controlnet_added_cond_kwargs = {
1805
+ "text_embeds": add_text_embeds.chunk(2)[1],
1806
+ "time_ids": add_time_ids.chunk(2)[1],
1807
+ }
1808
+ else:
1809
+ control_model_input = latent_model_input_controlnet
1810
+ controlnet_prompt_embeds = prompt_embeds
1811
+ controlnet_added_cond_kwargs = added_cond_kwargs
1812
+
1813
+ if isinstance(controlnet_keep[i], list):
1814
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1815
+ else:
1816
+ controlnet_cond_scale = controlnet_conditioning_scale
1817
+ if isinstance(controlnet_cond_scale, list):
1818
+ controlnet_cond_scale = controlnet_cond_scale[0]
1819
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1820
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1821
+ control_model_input,
1822
+ t,
1823
+ encoder_hidden_states=controlnet_prompt_embeds,
1824
+ controlnet_cond=control_image,
1825
+ conditioning_scale=cond_scale,
1826
+ guess_mode=guess_mode,
1827
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1828
+ return_dict=False,
1829
+ )
1830
+
1831
+ noise_pred = self.unet(
1832
+ latent_model_input,
1833
+ t,
1834
+ encoder_hidden_states=prompt_embeds,
1835
+ cross_attention_kwargs=cross_attention_kwargs,
1836
+ added_cond_kwargs=added_cond_kwargs,
1837
+ return_dict=False,
1838
+ down_intrablock_additional_residuals=down_block_additional_residuals, # t2iadapter
1839
+ down_block_additional_residuals=down_block_res_samples, # controlnet
1840
+ mid_block_additional_residual=mid_block_res_sample, # controlnet
1841
+ )[0]
1842
+
1843
+ # perform guidance
1844
+ if do_classifier_free_guidance:
1845
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1846
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1847
+
1848
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1849
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1850
+ noise_pred = rescale_noise_cfg(
1851
+ noise_pred,
1852
+ noise_pred_text,
1853
+ guidance_rescale=guidance_rescale,
1854
+ )
1855
+
1856
+ # compute the previous noisy sample x_t -> x_t-1
1857
+ latents = self.scheduler.step(
1858
+ noise_pred,
1859
+ t,
1860
+ latents,
1861
+ **extra_step_kwargs,
1862
+ return_dict=False,
1863
+ )[0]
1864
+
1865
+ if num_channels_unet == 4:
1866
+ init_latents_proper = image_latents
1867
+ if do_classifier_free_guidance:
1868
+ init_mask, _ = mask.chunk(2)
1869
+ else:
1870
+ init_mask = mask
1871
+
1872
+ if i < len(timesteps) - 1:
1873
+ noise_timestep = timesteps[i + 1]
1874
+ init_latents_proper = self.scheduler.add_noise(
1875
+ init_latents_proper,
1876
+ noise,
1877
+ torch.tensor([noise_timestep]),
1878
+ )
1879
+
1880
+ latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1881
+
1882
+ # call the callback, if provided
1883
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1884
+ progress_bar.update()
1885
+ if callback is not None and i % callback_steps == 0:
1886
+ callback(i, t, latents)
1887
+
1888
+ # make sure the VAE is in float32 mode, as it overflows in float16
1889
+ if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
1890
+ self.upcast_vae()
1891
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1892
+
1893
+ if output_type != "latent":
1894
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1895
+ else:
1896
+ image = latents
1897
+ return StableDiffusionXLPipelineOutput(images=image)
1898
+
1899
+ image = self.image_processor.postprocess(image, output_type=output_type)
1900
+
1901
+ # Offload last model to CPU
1902
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1903
+ self.final_offload_hook.offload()
1904
+
1905
+ if not return_dict:
1906
+ return (image,)
1907
+
1908
+ return StableDiffusionXLPipelineOutput(images=image)
v0.24.0/pipeline_zero1to3.py ADDED
@@ -0,0 +1,893 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A diffuser version implementation of Zero1to3 (https://github.com/cvlab-columbia/zero123), ICCV 2023
2
+ # by Xin Kong
3
+
4
+ import inspect
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import kornia
8
+ import numpy as np
9
+ import PIL.Image
10
+ import torch
11
+ from packaging import version
12
+ from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection
13
+
14
+ # from ...configuration_utils import FrozenDict
15
+ # from ...models import AutoencoderKL, UNet2DConditionModel
16
+ # from ...schedulers import KarrasDiffusionSchedulers
17
+ # from ...utils import (
18
+ # deprecate,
19
+ # is_accelerate_available,
20
+ # is_accelerate_version,
21
+ # logging,
22
+ # randn_tensor,
23
+ # replace_example_docstring,
24
+ # )
25
+ # from ..pipeline_utils import DiffusionPipeline
26
+ # from . import StableDiffusionPipelineOutput
27
+ # from .safety_checker import StableDiffusionSafetyChecker
28
+ from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel
29
+ from diffusers.configuration_utils import ConfigMixin, FrozenDict
30
+ from diffusers.models.modeling_utils import ModelMixin
31
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
32
+ from diffusers.schedulers import KarrasDiffusionSchedulers
33
+ from diffusers.utils import (
34
+ deprecate,
35
+ is_accelerate_available,
36
+ is_accelerate_version,
37
+ logging,
38
+ replace_example_docstring,
39
+ )
40
+ from diffusers.utils.torch_utils import randn_tensor
41
+
42
+
43
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
44
+ # todo
45
+ EXAMPLE_DOC_STRING = """
46
+ Examples:
47
+ ```py
48
+ >>> import torch
49
+ >>> from diffusers import StableDiffusionPipeline
50
+
51
+ >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
52
+ >>> pipe = pipe.to("cuda")
53
+
54
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
55
+ >>> image = pipe(prompt).images[0]
56
+ ```
57
+ """
58
+
59
+
60
+ class CCProjection(ModelMixin, ConfigMixin):
61
+ def __init__(self, in_channel=772, out_channel=768):
62
+ super().__init__()
63
+ self.in_channel = in_channel
64
+ self.out_channel = out_channel
65
+ self.projection = torch.nn.Linear(in_channel, out_channel)
66
+
67
+ def forward(self, x):
68
+ return self.projection(x)
69
+
70
+
71
+ class Zero1to3StableDiffusionPipeline(DiffusionPipeline):
72
+ r"""
73
+ Pipeline for single view conditioned novel view generation using Zero1to3.
74
+
75
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
76
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
77
+
78
+ Args:
79
+ vae ([`AutoencoderKL`]):
80
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
81
+ image_encoder ([`CLIPVisionModelWithProjection`]):
82
+ Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of
83
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection),
84
+ specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
85
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
86
+ scheduler ([`SchedulerMixin`]):
87
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
88
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
89
+ safety_checker ([`StableDiffusionSafetyChecker`]):
90
+ Classification module that estimates whether generated images could be considered offensive or harmful.
91
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
92
+ feature_extractor ([`CLIPFeatureExtractor`]):
93
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
94
+ cc_projection ([`CCProjection`]):
95
+ Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size.
96
+ """
97
+
98
+ _optional_components = ["safety_checker", "feature_extractor"]
99
+
100
+ def __init__(
101
+ self,
102
+ vae: AutoencoderKL,
103
+ image_encoder: CLIPVisionModelWithProjection,
104
+ unet: UNet2DConditionModel,
105
+ scheduler: KarrasDiffusionSchedulers,
106
+ safety_checker: StableDiffusionSafetyChecker,
107
+ feature_extractor: CLIPFeatureExtractor,
108
+ cc_projection: CCProjection,
109
+ requires_safety_checker: bool = True,
110
+ ):
111
+ super().__init__()
112
+
113
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
114
+ deprecation_message = (
115
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
116
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
117
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
118
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
119
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
120
+ " file"
121
+ )
122
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
123
+ new_config = dict(scheduler.config)
124
+ new_config["steps_offset"] = 1
125
+ scheduler._internal_dict = FrozenDict(new_config)
126
+
127
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
128
+ deprecation_message = (
129
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
130
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
131
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
132
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
133
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
134
+ )
135
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
136
+ new_config = dict(scheduler.config)
137
+ new_config["clip_sample"] = False
138
+ scheduler._internal_dict = FrozenDict(new_config)
139
+
140
+ if safety_checker is None and requires_safety_checker:
141
+ logger.warning(
142
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
143
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
144
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
145
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
146
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
147
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
148
+ )
149
+
150
+ if safety_checker is not None and feature_extractor is None:
151
+ raise ValueError(
152
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
153
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
154
+ )
155
+
156
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
157
+ version.parse(unet.config._diffusers_version).base_version
158
+ ) < version.parse("0.9.0.dev0")
159
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
160
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
161
+ deprecation_message = (
162
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
163
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
164
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
165
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
166
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
167
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
168
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
169
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
170
+ " the `unet/config.json` file"
171
+ )
172
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
173
+ new_config = dict(unet.config)
174
+ new_config["sample_size"] = 64
175
+ unet._internal_dict = FrozenDict(new_config)
176
+
177
+ self.register_modules(
178
+ vae=vae,
179
+ image_encoder=image_encoder,
180
+ unet=unet,
181
+ scheduler=scheduler,
182
+ safety_checker=safety_checker,
183
+ feature_extractor=feature_extractor,
184
+ cc_projection=cc_projection,
185
+ )
186
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
187
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
188
+ # self.model_mode = None
189
+
190
+ def enable_vae_slicing(self):
191
+ r"""
192
+ Enable sliced VAE decoding.
193
+
194
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
195
+ steps. This is useful to save some memory and allow larger batch sizes.
196
+ """
197
+ self.vae.enable_slicing()
198
+
199
+ def disable_vae_slicing(self):
200
+ r"""
201
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
202
+ computing decoding in one step.
203
+ """
204
+ self.vae.disable_slicing()
205
+
206
+ def enable_vae_tiling(self):
207
+ r"""
208
+ Enable tiled VAE decoding.
209
+
210
+ When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
211
+ several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
212
+ """
213
+ self.vae.enable_tiling()
214
+
215
+ def disable_vae_tiling(self):
216
+ r"""
217
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
218
+ computing decoding in one step.
219
+ """
220
+ self.vae.disable_tiling()
221
+
222
+ def enable_sequential_cpu_offload(self, gpu_id=0):
223
+ r"""
224
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
225
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
226
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
227
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
228
+ `enable_model_cpu_offload`, but performance is lower.
229
+ """
230
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
231
+ from accelerate import cpu_offload
232
+ else:
233
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
234
+
235
+ device = torch.device(f"cuda:{gpu_id}")
236
+
237
+ if self.device.type != "cpu":
238
+ self.to("cpu", silence_dtype_warnings=True)
239
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
240
+
241
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
242
+ cpu_offload(cpu_offloaded_model, device)
243
+
244
+ if self.safety_checker is not None:
245
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
246
+
247
+ def enable_model_cpu_offload(self, gpu_id=0):
248
+ r"""
249
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
250
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
251
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
252
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
253
+ """
254
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
255
+ from accelerate import cpu_offload_with_hook
256
+ else:
257
+ raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.")
258
+
259
+ device = torch.device(f"cuda:{gpu_id}")
260
+
261
+ if self.device.type != "cpu":
262
+ self.to("cpu", silence_dtype_warnings=True)
263
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
264
+
265
+ hook = None
266
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
267
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
268
+
269
+ if self.safety_checker is not None:
270
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
271
+
272
+ # We'll offload the last model manually.
273
+ self.final_offload_hook = hook
274
+
275
+ @property
276
+ def _execution_device(self):
277
+ r"""
278
+ Returns the device on which the pipeline's models will be executed. After calling
279
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
280
+ hooks.
281
+ """
282
+ if not hasattr(self.unet, "_hf_hook"):
283
+ return self.device
284
+ for module in self.unet.modules():
285
+ if (
286
+ hasattr(module, "_hf_hook")
287
+ and hasattr(module._hf_hook, "execution_device")
288
+ and module._hf_hook.execution_device is not None
289
+ ):
290
+ return torch.device(module._hf_hook.execution_device)
291
+ return self.device
292
+
293
+ def _encode_prompt(
294
+ self,
295
+ prompt,
296
+ device,
297
+ num_images_per_prompt,
298
+ do_classifier_free_guidance,
299
+ negative_prompt=None,
300
+ prompt_embeds: Optional[torch.FloatTensor] = None,
301
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
302
+ ):
303
+ r"""
304
+ Encodes the prompt into text encoder hidden states.
305
+
306
+ Args:
307
+ prompt (`str` or `List[str]`, *optional*):
308
+ prompt to be encoded
309
+ device: (`torch.device`):
310
+ torch device
311
+ num_images_per_prompt (`int`):
312
+ number of images that should be generated per prompt
313
+ do_classifier_free_guidance (`bool`):
314
+ whether to use classifier free guidance or not
315
+ negative_prompt (`str` or `List[str]`, *optional*):
316
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
317
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
318
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
319
+ prompt_embeds (`torch.FloatTensor`, *optional*):
320
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
321
+ provided, text embeddings will be generated from `prompt` input argument.
322
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
323
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
324
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
325
+ argument.
326
+ """
327
+ if prompt is not None and isinstance(prompt, str):
328
+ batch_size = 1
329
+ elif prompt is not None and isinstance(prompt, list):
330
+ batch_size = len(prompt)
331
+ else:
332
+ batch_size = prompt_embeds.shape[0]
333
+
334
+ if prompt_embeds is None:
335
+ text_inputs = self.tokenizer(
336
+ prompt,
337
+ padding="max_length",
338
+ max_length=self.tokenizer.model_max_length,
339
+ truncation=True,
340
+ return_tensors="pt",
341
+ )
342
+ text_input_ids = text_inputs.input_ids
343
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
344
+
345
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
346
+ text_input_ids, untruncated_ids
347
+ ):
348
+ removed_text = self.tokenizer.batch_decode(
349
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
350
+ )
351
+ logger.warning(
352
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
353
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
354
+ )
355
+
356
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
357
+ attention_mask = text_inputs.attention_mask.to(device)
358
+ else:
359
+ attention_mask = None
360
+
361
+ prompt_embeds = self.text_encoder(
362
+ text_input_ids.to(device),
363
+ attention_mask=attention_mask,
364
+ )
365
+ prompt_embeds = prompt_embeds[0]
366
+
367
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
368
+
369
+ bs_embed, seq_len, _ = prompt_embeds.shape
370
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
371
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
372
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
373
+
374
+ # get unconditional embeddings for classifier free guidance
375
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
376
+ uncond_tokens: List[str]
377
+ if negative_prompt is None:
378
+ uncond_tokens = [""] * batch_size
379
+ elif type(prompt) is not type(negative_prompt):
380
+ raise TypeError(
381
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
382
+ f" {type(prompt)}."
383
+ )
384
+ elif isinstance(negative_prompt, str):
385
+ uncond_tokens = [negative_prompt]
386
+ elif batch_size != len(negative_prompt):
387
+ raise ValueError(
388
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
389
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
390
+ " the batch size of `prompt`."
391
+ )
392
+ else:
393
+ uncond_tokens = negative_prompt
394
+
395
+ max_length = prompt_embeds.shape[1]
396
+ uncond_input = self.tokenizer(
397
+ uncond_tokens,
398
+ padding="max_length",
399
+ max_length=max_length,
400
+ truncation=True,
401
+ return_tensors="pt",
402
+ )
403
+
404
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
405
+ attention_mask = uncond_input.attention_mask.to(device)
406
+ else:
407
+ attention_mask = None
408
+
409
+ negative_prompt_embeds = self.text_encoder(
410
+ uncond_input.input_ids.to(device),
411
+ attention_mask=attention_mask,
412
+ )
413
+ negative_prompt_embeds = negative_prompt_embeds[0]
414
+
415
+ if do_classifier_free_guidance:
416
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
417
+ seq_len = negative_prompt_embeds.shape[1]
418
+
419
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
420
+
421
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
422
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
423
+
424
+ # For classifier free guidance, we need to do two forward passes.
425
+ # Here we concatenate the unconditional and text embeddings into a single batch
426
+ # to avoid doing two forward passes
427
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
428
+
429
+ return prompt_embeds
430
+
431
+ def CLIP_preprocess(self, x):
432
+ dtype = x.dtype
433
+ # following openai's implementation
434
+ # TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741
435
+ # follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608
436
+ if isinstance(x, torch.Tensor):
437
+ if x.min() < -1.0 or x.max() > 1.0:
438
+ raise ValueError("Expected input tensor to have values in the range [-1, 1]")
439
+ x = kornia.geometry.resize(
440
+ x.to(torch.float32), (224, 224), interpolation="bicubic", align_corners=True, antialias=False
441
+ ).to(dtype=dtype)
442
+ x = (x + 1.0) / 2.0
443
+ # renormalize according to clip
444
+ x = kornia.enhance.normalize(
445
+ x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711])
446
+ )
447
+ return x
448
+
449
+ # from image_variation
450
+ def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance):
451
+ dtype = next(self.image_encoder.parameters()).dtype
452
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
453
+ raise ValueError(
454
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
455
+ )
456
+
457
+ if isinstance(image, torch.Tensor):
458
+ # Batch single image
459
+ if image.ndim == 3:
460
+ assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
461
+ image = image.unsqueeze(0)
462
+
463
+ assert image.ndim == 4, "Image must have 4 dimensions"
464
+
465
+ # Check image is in [-1, 1]
466
+ if image.min() < -1 or image.max() > 1:
467
+ raise ValueError("Image should be in [-1, 1] range")
468
+ else:
469
+ # preprocess image
470
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
471
+ image = [image]
472
+
473
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
474
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
475
+ image = np.concatenate(image, axis=0)
476
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
477
+ image = np.concatenate([i[None, :] for i in image], axis=0)
478
+
479
+ image = image.transpose(0, 3, 1, 2)
480
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
481
+
482
+ image = image.to(device=device, dtype=dtype)
483
+
484
+ image = self.CLIP_preprocess(image)
485
+ # if not isinstance(image, torch.Tensor):
486
+ # # 0-255
487
+ # print("Warning: image is processed by hf's preprocess, which is different from openai original's.")
488
+ # image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
489
+ image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype)
490
+ image_embeddings = image_embeddings.unsqueeze(1)
491
+
492
+ # duplicate image embeddings for each generation per prompt, using mps friendly method
493
+ bs_embed, seq_len, _ = image_embeddings.shape
494
+ image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
495
+ image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
496
+
497
+ if do_classifier_free_guidance:
498
+ negative_prompt_embeds = torch.zeros_like(image_embeddings)
499
+
500
+ # For classifier free guidance, we need to do two forward passes.
501
+ # Here we concatenate the unconditional and text embeddings into a single batch
502
+ # to avoid doing two forward passes
503
+ image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
504
+
505
+ return image_embeddings
506
+
507
+ def _encode_pose(self, pose, device, num_images_per_prompt, do_classifier_free_guidance):
508
+ dtype = next(self.cc_projection.parameters()).dtype
509
+ if isinstance(pose, torch.Tensor):
510
+ pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype)
511
+ else:
512
+ if isinstance(pose[0], list):
513
+ pose = torch.Tensor(pose)
514
+ else:
515
+ pose = torch.Tensor([pose])
516
+ x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1)
517
+ pose_embeddings = (
518
+ torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1)
519
+ .unsqueeze(1)
520
+ .to(device=device, dtype=dtype)
521
+ ) # B, 1, 4
522
+ # duplicate pose embeddings for each generation per prompt, using mps friendly method
523
+ bs_embed, seq_len, _ = pose_embeddings.shape
524
+ pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1)
525
+ pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
526
+ if do_classifier_free_guidance:
527
+ negative_prompt_embeds = torch.zeros_like(pose_embeddings)
528
+
529
+ # For classifier free guidance, we need to do two forward passes.
530
+ # Here we concatenate the unconditional and text embeddings into a single batch
531
+ # to avoid doing two forward passes
532
+ pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings])
533
+ return pose_embeddings
534
+
535
+ def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_classifier_free_guidance):
536
+ img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False)
537
+ pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False)
538
+ prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1)
539
+ prompt_embeds = self.cc_projection(prompt_embeds)
540
+ # prompt_embeds = img_prompt_embeds
541
+ # follow 0123, add negative prompt, after projection
542
+ if do_classifier_free_guidance:
543
+ negative_prompt = torch.zeros_like(prompt_embeds)
544
+ prompt_embeds = torch.cat([negative_prompt, prompt_embeds])
545
+ return prompt_embeds
546
+
547
+ def run_safety_checker(self, image, device, dtype):
548
+ if self.safety_checker is not None:
549
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
550
+ image, has_nsfw_concept = self.safety_checker(
551
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
552
+ )
553
+ else:
554
+ has_nsfw_concept = None
555
+ return image, has_nsfw_concept
556
+
557
+ def decode_latents(self, latents):
558
+ latents = 1 / self.vae.config.scaling_factor * latents
559
+ image = self.vae.decode(latents).sample
560
+ image = (image / 2 + 0.5).clamp(0, 1)
561
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
562
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
563
+ return image
564
+
565
+ def prepare_extra_step_kwargs(self, generator, eta):
566
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
567
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
568
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
569
+ # and should be between [0, 1]
570
+
571
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
572
+ extra_step_kwargs = {}
573
+ if accepts_eta:
574
+ extra_step_kwargs["eta"] = eta
575
+
576
+ # check if the scheduler accepts generator
577
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
578
+ if accepts_generator:
579
+ extra_step_kwargs["generator"] = generator
580
+ return extra_step_kwargs
581
+
582
+ def check_inputs(self, image, height, width, callback_steps):
583
+ if (
584
+ not isinstance(image, torch.Tensor)
585
+ and not isinstance(image, PIL.Image.Image)
586
+ and not isinstance(image, list)
587
+ ):
588
+ raise ValueError(
589
+ "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
590
+ f" {type(image)}"
591
+ )
592
+
593
+ if height % 8 != 0 or width % 8 != 0:
594
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
595
+
596
+ if (callback_steps is None) or (
597
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
598
+ ):
599
+ raise ValueError(
600
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
601
+ f" {type(callback_steps)}."
602
+ )
603
+
604
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
605
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
606
+ if isinstance(generator, list) and len(generator) != batch_size:
607
+ raise ValueError(
608
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
609
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
610
+ )
611
+
612
+ if latents is None:
613
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
614
+ else:
615
+ latents = latents.to(device)
616
+
617
+ # scale the initial noise by the standard deviation required by the scheduler
618
+ latents = latents * self.scheduler.init_noise_sigma
619
+ return latents
620
+
621
+ def prepare_img_latents(self, image, batch_size, dtype, device, generator=None, do_classifier_free_guidance=False):
622
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
623
+ raise ValueError(
624
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
625
+ )
626
+
627
+ if isinstance(image, torch.Tensor):
628
+ # Batch single image
629
+ if image.ndim == 3:
630
+ assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
631
+ image = image.unsqueeze(0)
632
+
633
+ assert image.ndim == 4, "Image must have 4 dimensions"
634
+
635
+ # Check image is in [-1, 1]
636
+ if image.min() < -1 or image.max() > 1:
637
+ raise ValueError("Image should be in [-1, 1] range")
638
+ else:
639
+ # preprocess image
640
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
641
+ image = [image]
642
+
643
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
644
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
645
+ image = np.concatenate(image, axis=0)
646
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
647
+ image = np.concatenate([i[None, :] for i in image], axis=0)
648
+
649
+ image = image.transpose(0, 3, 1, 2)
650
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
651
+
652
+ image = image.to(device=device, dtype=dtype)
653
+
654
+ if isinstance(generator, list) and len(generator) != batch_size:
655
+ raise ValueError(
656
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
657
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
658
+ )
659
+
660
+ if isinstance(generator, list):
661
+ init_latents = [
662
+ self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i])
663
+ for i in range(batch_size) # sample
664
+ ]
665
+ init_latents = torch.cat(init_latents, dim=0)
666
+ else:
667
+ init_latents = self.vae.encode(image).latent_dist.mode()
668
+
669
+ # init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor
670
+ if batch_size > init_latents.shape[0]:
671
+ # init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1)
672
+ num_images_per_prompt = batch_size // init_latents.shape[0]
673
+ # duplicate image latents for each generation per prompt, using mps friendly method
674
+ bs_embed, emb_c, emb_h, emb_w = init_latents.shape
675
+ init_latents = init_latents.unsqueeze(1)
676
+ init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1)
677
+ init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w)
678
+
679
+ # init_latents = torch.cat([init_latents]*2) if do_classifier_free_guidance else init_latents # follow zero123
680
+ init_latents = (
681
+ torch.cat([torch.zeros_like(init_latents), init_latents]) if do_classifier_free_guidance else init_latents
682
+ )
683
+
684
+ init_latents = init_latents.to(device=device, dtype=dtype)
685
+ return init_latents
686
+
687
+ # def load_cc_projection(self, pretrained_weights=None):
688
+ # self.cc_projection = torch.nn.Linear(772, 768)
689
+ # torch.nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])
690
+ # torch.nn.init.zeros_(list(self.cc_projection.parameters())[1])
691
+ # if pretrained_weights is not None:
692
+ # self.cc_projection.load_state_dict(pretrained_weights)
693
+
694
+ @torch.no_grad()
695
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
696
+ def __call__(
697
+ self,
698
+ input_imgs: Union[torch.FloatTensor, PIL.Image.Image] = None,
699
+ prompt_imgs: Union[torch.FloatTensor, PIL.Image.Image] = None,
700
+ poses: Union[List[float], List[List[float]]] = None,
701
+ torch_dtype=torch.float32,
702
+ height: Optional[int] = None,
703
+ width: Optional[int] = None,
704
+ num_inference_steps: int = 50,
705
+ guidance_scale: float = 3.0,
706
+ negative_prompt: Optional[Union[str, List[str]]] = None,
707
+ num_images_per_prompt: Optional[int] = 1,
708
+ eta: float = 0.0,
709
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
710
+ latents: Optional[torch.FloatTensor] = None,
711
+ prompt_embeds: Optional[torch.FloatTensor] = None,
712
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
713
+ output_type: Optional[str] = "pil",
714
+ return_dict: bool = True,
715
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
716
+ callback_steps: int = 1,
717
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
718
+ controlnet_conditioning_scale: float = 1.0,
719
+ ):
720
+ r"""
721
+ Function invoked when calling the pipeline for generation.
722
+
723
+ Args:
724
+ input_imgs (`PIL` or `List[PIL]`, *optional*):
725
+ The single input image for each 3D object
726
+ prompt_imgs (`PIL` or `List[PIL]`, *optional*):
727
+ Same as input_imgs, but will be used later as an image prompt condition, encoded by CLIP feature
728
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
729
+ The height in pixels of the generated image.
730
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
731
+ The width in pixels of the generated image.
732
+ num_inference_steps (`int`, *optional*, defaults to 50):
733
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
734
+ expense of slower inference.
735
+ guidance_scale (`float`, *optional*, defaults to 7.5):
736
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
737
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
738
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
739
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
740
+ usually at the expense of lower image quality.
741
+ negative_prompt (`str` or `List[str]`, *optional*):
742
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
743
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
744
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
745
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
746
+ The number of images to generate per prompt.
747
+ eta (`float`, *optional*, defaults to 0.0):
748
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
749
+ [`schedulers.DDIMScheduler`], will be ignored for others.
750
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
751
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
752
+ to make generation deterministic.
753
+ latents (`torch.FloatTensor`, *optional*):
754
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
755
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
756
+ tensor will ge generated by sampling using the supplied random `generator`.
757
+ prompt_embeds (`torch.FloatTensor`, *optional*):
758
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
759
+ provided, text embeddings will be generated from `prompt` input argument.
760
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
761
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
762
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
763
+ argument.
764
+ output_type (`str`, *optional*, defaults to `"pil"`):
765
+ The output format of the generate image. Choose between
766
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
767
+ return_dict (`bool`, *optional*, defaults to `True`):
768
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
769
+ plain tuple.
770
+ callback (`Callable`, *optional*):
771
+ A function that will be called every `callback_steps` steps during inference. The function will be
772
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
773
+ callback_steps (`int`, *optional*, defaults to 1):
774
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
775
+ called at every step.
776
+ cross_attention_kwargs (`dict`, *optional*):
777
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
778
+ `self.processor` in
779
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
780
+
781
+ Examples:
782
+
783
+ Returns:
784
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
785
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
786
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
787
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
788
+ (nsfw) content, according to the `safety_checker`.
789
+ """
790
+ # 0. Default height and width to unet
791
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
792
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
793
+
794
+ # 1. Check inputs. Raise error if not correct
795
+ # input_image = hint_imgs
796
+ self.check_inputs(input_imgs, height, width, callback_steps)
797
+
798
+ # 2. Define call parameters
799
+ if isinstance(input_imgs, PIL.Image.Image):
800
+ batch_size = 1
801
+ elif isinstance(input_imgs, list):
802
+ batch_size = len(input_imgs)
803
+ else:
804
+ batch_size = input_imgs.shape[0]
805
+ device = self._execution_device
806
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
807
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
808
+ # corresponds to doing no classifier free guidance.
809
+ do_classifier_free_guidance = guidance_scale > 1.0
810
+
811
+ # 3. Encode input image with pose as prompt
812
+ prompt_embeds = self._encode_image_with_pose(
813
+ prompt_imgs, poses, device, num_images_per_prompt, do_classifier_free_guidance
814
+ )
815
+
816
+ # 4. Prepare timesteps
817
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
818
+ timesteps = self.scheduler.timesteps
819
+
820
+ # 5. Prepare latent variables
821
+ latents = self.prepare_latents(
822
+ batch_size * num_images_per_prompt,
823
+ 4,
824
+ height,
825
+ width,
826
+ prompt_embeds.dtype,
827
+ device,
828
+ generator,
829
+ latents,
830
+ )
831
+
832
+ # 6. Prepare image latents
833
+ img_latents = self.prepare_img_latents(
834
+ input_imgs,
835
+ batch_size * num_images_per_prompt,
836
+ prompt_embeds.dtype,
837
+ device,
838
+ generator,
839
+ do_classifier_free_guidance,
840
+ )
841
+
842
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
843
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
844
+
845
+ # 7. Denoising loop
846
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
847
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
848
+ for i, t in enumerate(timesteps):
849
+ # expand the latents if we are doing classifier free guidance
850
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
851
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
852
+ latent_model_input = torch.cat([latent_model_input, img_latents], dim=1)
853
+
854
+ # predict the noise residual
855
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
856
+
857
+ # perform guidance
858
+ if do_classifier_free_guidance:
859
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
860
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
861
+
862
+ # compute the previous noisy sample x_t -> x_t-1
863
+ # latents = self.scheduler.step(noise_pred.to(dtype=torch.float32), t, latents.to(dtype=torch.float32)).prev_sample.to(prompt_embeds.dtype)
864
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
865
+
866
+ # call the callback, if provided
867
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
868
+ progress_bar.update()
869
+ if callback is not None and i % callback_steps == 0:
870
+ step_idx = i // getattr(self.scheduler, "order", 1)
871
+ callback(step_idx, t, latents)
872
+
873
+ # 8. Post-processing
874
+ has_nsfw_concept = None
875
+ if output_type == "latent":
876
+ image = latents
877
+ elif output_type == "pil":
878
+ # 8. Post-processing
879
+ image = self.decode_latents(latents)
880
+ # 10. Convert to PIL
881
+ image = self.numpy_to_pil(image)
882
+ else:
883
+ # 8. Post-processing
884
+ image = self.decode_latents(latents)
885
+
886
+ # Offload last model to CPU
887
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
888
+ self.final_offload_hook.offload()
889
+
890
+ if not return_dict:
891
+ return (image, has_nsfw_concept)
892
+
893
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/run_onnx_controlnet.py ADDED
@@ -0,0 +1,911 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import inspect
3
+ import os
4
+ import time
5
+ import warnings
6
+ from typing import Any, Callable, Dict, List, Optional, Union
7
+
8
+ import numpy as np
9
+ import PIL.Image
10
+ import torch
11
+ from PIL import Image
12
+ from transformers import CLIPTokenizer
13
+
14
+ from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
15
+ from diffusers.image_processor import VaeImageProcessor
16
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
17
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
18
+ from diffusers.schedulers import KarrasDiffusionSchedulers
19
+ from diffusers.utils import (
20
+ deprecate,
21
+ logging,
22
+ replace_example_docstring,
23
+ )
24
+ from diffusers.utils.torch_utils import randn_tensor
25
+
26
+
27
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
+
29
+
30
+ EXAMPLE_DOC_STRING = """
31
+ Examples:
32
+ ```py
33
+ >>> # !pip install opencv-python transformers accelerate
34
+ >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
35
+ >>> from diffusers.utils import load_image
36
+ >>> import numpy as np
37
+ >>> import torch
38
+
39
+ >>> import cv2
40
+ >>> from PIL import Image
41
+
42
+ >>> # download an image
43
+ >>> image = load_image(
44
+ ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
45
+ ... )
46
+ >>> np_image = np.array(image)
47
+
48
+ >>> # get canny image
49
+ >>> np_image = cv2.Canny(np_image, 100, 200)
50
+ >>> np_image = np_image[:, :, None]
51
+ >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
52
+ >>> canny_image = Image.fromarray(np_image)
53
+
54
+ >>> # load control net and stable diffusion v1-5
55
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
56
+ >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
57
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
58
+ ... )
59
+
60
+ >>> # speed up diffusion process with faster scheduler and memory optimization
61
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
62
+ >>> pipe.enable_model_cpu_offload()
63
+
64
+ >>> # generate image
65
+ >>> generator = torch.manual_seed(0)
66
+ >>> image = pipe(
67
+ ... "futuristic-looking woman",
68
+ ... num_inference_steps=20,
69
+ ... generator=generator,
70
+ ... image=image,
71
+ ... control_image=canny_image,
72
+ ... ).images[0]
73
+ ```
74
+ """
75
+
76
+
77
+ def prepare_image(image):
78
+ if isinstance(image, torch.Tensor):
79
+ # Batch single image
80
+ if image.ndim == 3:
81
+ image = image.unsqueeze(0)
82
+
83
+ image = image.to(dtype=torch.float32)
84
+ else:
85
+ # preprocess image
86
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
87
+ image = [image]
88
+
89
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
90
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
91
+ image = np.concatenate(image, axis=0)
92
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
93
+ image = np.concatenate([i[None, :] for i in image], axis=0)
94
+
95
+ image = image.transpose(0, 3, 1, 2)
96
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
97
+
98
+ return image
99
+
100
+
101
+ class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
102
+ vae_encoder: OnnxRuntimeModel
103
+ vae_decoder: OnnxRuntimeModel
104
+ text_encoder: OnnxRuntimeModel
105
+ tokenizer: CLIPTokenizer
106
+ unet: OnnxRuntimeModel
107
+ scheduler: KarrasDiffusionSchedulers
108
+
109
+ def __init__(
110
+ self,
111
+ vae_encoder: OnnxRuntimeModel,
112
+ vae_decoder: OnnxRuntimeModel,
113
+ text_encoder: OnnxRuntimeModel,
114
+ tokenizer: CLIPTokenizer,
115
+ unet: OnnxRuntimeModel,
116
+ scheduler: KarrasDiffusionSchedulers,
117
+ ):
118
+ super().__init__()
119
+
120
+ self.register_modules(
121
+ vae_encoder=vae_encoder,
122
+ vae_decoder=vae_decoder,
123
+ text_encoder=text_encoder,
124
+ tokenizer=tokenizer,
125
+ unet=unet,
126
+ scheduler=scheduler,
127
+ )
128
+ self.vae_scale_factor = 2 ** (4 - 1)
129
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
130
+ self.control_image_processor = VaeImageProcessor(
131
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
132
+ )
133
+
134
+ def _encode_prompt(
135
+ self,
136
+ prompt: Union[str, List[str]],
137
+ num_images_per_prompt: Optional[int],
138
+ do_classifier_free_guidance: bool,
139
+ negative_prompt: Optional[str],
140
+ prompt_embeds: Optional[np.ndarray] = None,
141
+ negative_prompt_embeds: Optional[np.ndarray] = None,
142
+ ):
143
+ r"""
144
+ Encodes the prompt into text encoder hidden states.
145
+
146
+ Args:
147
+ prompt (`str` or `List[str]`):
148
+ prompt to be encoded
149
+ num_images_per_prompt (`int`):
150
+ number of images that should be generated per prompt
151
+ do_classifier_free_guidance (`bool`):
152
+ whether to use classifier free guidance or not
153
+ negative_prompt (`str` or `List[str]`):
154
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
155
+ if `guidance_scale` is less than `1`).
156
+ prompt_embeds (`np.ndarray`, *optional*):
157
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
158
+ provided, text embeddings will be generated from `prompt` input argument.
159
+ negative_prompt_embeds (`np.ndarray`, *optional*):
160
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
161
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
162
+ argument.
163
+ """
164
+ if prompt is not None and isinstance(prompt, str):
165
+ batch_size = 1
166
+ elif prompt is not None and isinstance(prompt, list):
167
+ batch_size = len(prompt)
168
+ else:
169
+ batch_size = prompt_embeds.shape[0]
170
+
171
+ if prompt_embeds is None:
172
+ # get prompt text embeddings
173
+ text_inputs = self.tokenizer(
174
+ prompt,
175
+ padding="max_length",
176
+ max_length=self.tokenizer.model_max_length,
177
+ truncation=True,
178
+ return_tensors="np",
179
+ )
180
+ text_input_ids = text_inputs.input_ids
181
+ untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
182
+
183
+ if not np.array_equal(text_input_ids, untruncated_ids):
184
+ removed_text = self.tokenizer.batch_decode(
185
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
186
+ )
187
+ logger.warning(
188
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
189
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
190
+ )
191
+
192
+ prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
193
+
194
+ prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
195
+
196
+ # get unconditional embeddings for classifier free guidance
197
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
198
+ uncond_tokens: List[str]
199
+ if negative_prompt is None:
200
+ uncond_tokens = [""] * batch_size
201
+ elif type(prompt) is not type(negative_prompt):
202
+ raise TypeError(
203
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
204
+ f" {type(prompt)}."
205
+ )
206
+ elif isinstance(negative_prompt, str):
207
+ uncond_tokens = [negative_prompt] * batch_size
208
+ elif batch_size != len(negative_prompt):
209
+ raise ValueError(
210
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
211
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
212
+ " the batch size of `prompt`."
213
+ )
214
+ else:
215
+ uncond_tokens = negative_prompt
216
+
217
+ max_length = prompt_embeds.shape[1]
218
+ uncond_input = self.tokenizer(
219
+ uncond_tokens,
220
+ padding="max_length",
221
+ max_length=max_length,
222
+ truncation=True,
223
+ return_tensors="np",
224
+ )
225
+ negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
226
+
227
+ if do_classifier_free_guidance:
228
+ negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
229
+
230
+ # For classifier free guidance, we need to do two forward passes.
231
+ # Here we concatenate the unconditional and text embeddings into a single batch
232
+ # to avoid doing two forward passes
233
+ prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
234
+
235
+ return prompt_embeds
236
+
237
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
238
+ def decode_latents(self, latents):
239
+ warnings.warn(
240
+ "The decode_latents method is deprecated and will be removed in a future version. Please"
241
+ " use VaeImageProcessor instead",
242
+ FutureWarning,
243
+ )
244
+ latents = 1 / self.vae.config.scaling_factor * latents
245
+ image = self.vae.decode(latents, return_dict=False)[0]
246
+ image = (image / 2 + 0.5).clamp(0, 1)
247
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
248
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
249
+ return image
250
+
251
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
252
+ def prepare_extra_step_kwargs(self, generator, eta):
253
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
254
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
255
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
256
+ # and should be between [0, 1]
257
+
258
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
259
+ extra_step_kwargs = {}
260
+ if accepts_eta:
261
+ extra_step_kwargs["eta"] = eta
262
+
263
+ # check if the scheduler accepts generator
264
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
265
+ if accepts_generator:
266
+ extra_step_kwargs["generator"] = generator
267
+ return extra_step_kwargs
268
+
269
+ def check_inputs(
270
+ self,
271
+ num_controlnet,
272
+ prompt,
273
+ image,
274
+ callback_steps,
275
+ negative_prompt=None,
276
+ prompt_embeds=None,
277
+ negative_prompt_embeds=None,
278
+ controlnet_conditioning_scale=1.0,
279
+ control_guidance_start=0.0,
280
+ control_guidance_end=1.0,
281
+ ):
282
+ if (callback_steps is None) or (
283
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
284
+ ):
285
+ raise ValueError(
286
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
287
+ f" {type(callback_steps)}."
288
+ )
289
+
290
+ if prompt is not None and prompt_embeds is not None:
291
+ raise ValueError(
292
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
293
+ " only forward one of the two."
294
+ )
295
+ elif prompt is None and prompt_embeds is None:
296
+ raise ValueError(
297
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
298
+ )
299
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
300
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
301
+
302
+ if negative_prompt is not None and negative_prompt_embeds is not None:
303
+ raise ValueError(
304
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
305
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
306
+ )
307
+
308
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
309
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
310
+ raise ValueError(
311
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
312
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
313
+ f" {negative_prompt_embeds.shape}."
314
+ )
315
+
316
+ # Check `image`
317
+ if num_controlnet == 1:
318
+ self.check_image(image, prompt, prompt_embeds)
319
+ elif num_controlnet > 1:
320
+ if not isinstance(image, list):
321
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
322
+
323
+ # When `image` is a nested list:
324
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
325
+ elif any(isinstance(i, list) for i in image):
326
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
327
+ elif len(image) != num_controlnet:
328
+ raise ValueError(
329
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
330
+ )
331
+
332
+ for image_ in image:
333
+ self.check_image(image_, prompt, prompt_embeds)
334
+ else:
335
+ assert False
336
+
337
+ # Check `controlnet_conditioning_scale`
338
+ if num_controlnet == 1:
339
+ if not isinstance(controlnet_conditioning_scale, float):
340
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
341
+ elif num_controlnet > 1:
342
+ if isinstance(controlnet_conditioning_scale, list):
343
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
344
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
345
+ elif (
346
+ isinstance(controlnet_conditioning_scale, list)
347
+ and len(controlnet_conditioning_scale) != num_controlnet
348
+ ):
349
+ raise ValueError(
350
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
351
+ " the same length as the number of controlnets"
352
+ )
353
+ else:
354
+ assert False
355
+
356
+ if len(control_guidance_start) != len(control_guidance_end):
357
+ raise ValueError(
358
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
359
+ )
360
+
361
+ if num_controlnet > 1:
362
+ if len(control_guidance_start) != num_controlnet:
363
+ raise ValueError(
364
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
365
+ )
366
+
367
+ for start, end in zip(control_guidance_start, control_guidance_end):
368
+ if start >= end:
369
+ raise ValueError(
370
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
371
+ )
372
+ if start < 0.0:
373
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
374
+ if end > 1.0:
375
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
376
+
377
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
378
+ def check_image(self, image, prompt, prompt_embeds):
379
+ image_is_pil = isinstance(image, PIL.Image.Image)
380
+ image_is_tensor = isinstance(image, torch.Tensor)
381
+ image_is_np = isinstance(image, np.ndarray)
382
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
383
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
384
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
385
+
386
+ if (
387
+ not image_is_pil
388
+ and not image_is_tensor
389
+ and not image_is_np
390
+ and not image_is_pil_list
391
+ and not image_is_tensor_list
392
+ and not image_is_np_list
393
+ ):
394
+ raise TypeError(
395
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
396
+ )
397
+
398
+ if image_is_pil:
399
+ image_batch_size = 1
400
+ else:
401
+ image_batch_size = len(image)
402
+
403
+ if prompt is not None and isinstance(prompt, str):
404
+ prompt_batch_size = 1
405
+ elif prompt is not None and isinstance(prompt, list):
406
+ prompt_batch_size = len(prompt)
407
+ elif prompt_embeds is not None:
408
+ prompt_batch_size = prompt_embeds.shape[0]
409
+
410
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
411
+ raise ValueError(
412
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
413
+ )
414
+
415
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
416
+ def prepare_control_image(
417
+ self,
418
+ image,
419
+ width,
420
+ height,
421
+ batch_size,
422
+ num_images_per_prompt,
423
+ device,
424
+ dtype,
425
+ do_classifier_free_guidance=False,
426
+ guess_mode=False,
427
+ ):
428
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
429
+ image_batch_size = image.shape[0]
430
+
431
+ if image_batch_size == 1:
432
+ repeat_by = batch_size
433
+ else:
434
+ # image batch size is the same as prompt batch size
435
+ repeat_by = num_images_per_prompt
436
+
437
+ image = image.repeat_interleave(repeat_by, dim=0)
438
+
439
+ image = image.to(device=device, dtype=dtype)
440
+
441
+ if do_classifier_free_guidance and not guess_mode:
442
+ image = torch.cat([image] * 2)
443
+
444
+ return image
445
+
446
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
447
+ def get_timesteps(self, num_inference_steps, strength, device):
448
+ # get the original timestep using init_timestep
449
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
450
+
451
+ t_start = max(num_inference_steps - init_timestep, 0)
452
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
453
+
454
+ return timesteps, num_inference_steps - t_start
455
+
456
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
457
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
458
+ raise ValueError(
459
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
460
+ )
461
+
462
+ image = image.to(device=device, dtype=dtype)
463
+
464
+ batch_size = batch_size * num_images_per_prompt
465
+
466
+ if image.shape[1] == 4:
467
+ init_latents = image
468
+
469
+ else:
470
+ _image = image.cpu().detach().numpy()
471
+ init_latents = self.vae_encoder(sample=_image)[0]
472
+ init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
473
+ init_latents = 0.18215 * init_latents
474
+
475
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
476
+ # expand init_latents for batch_size
477
+ deprecation_message = (
478
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
479
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
480
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
481
+ " your script to pass as many initial images as text prompts to suppress this warning."
482
+ )
483
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
484
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
485
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
486
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
487
+ raise ValueError(
488
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
489
+ )
490
+ else:
491
+ init_latents = torch.cat([init_latents], dim=0)
492
+
493
+ shape = init_latents.shape
494
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
495
+
496
+ # get latents
497
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
498
+ latents = init_latents
499
+
500
+ return latents
501
+
502
+ @torch.no_grad()
503
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
504
+ def __call__(
505
+ self,
506
+ num_controlnet: int,
507
+ fp16: bool = True,
508
+ prompt: Union[str, List[str]] = None,
509
+ image: Union[
510
+ torch.FloatTensor,
511
+ PIL.Image.Image,
512
+ np.ndarray,
513
+ List[torch.FloatTensor],
514
+ List[PIL.Image.Image],
515
+ List[np.ndarray],
516
+ ] = None,
517
+ control_image: Union[
518
+ torch.FloatTensor,
519
+ PIL.Image.Image,
520
+ np.ndarray,
521
+ List[torch.FloatTensor],
522
+ List[PIL.Image.Image],
523
+ List[np.ndarray],
524
+ ] = None,
525
+ height: Optional[int] = None,
526
+ width: Optional[int] = None,
527
+ strength: float = 0.8,
528
+ num_inference_steps: int = 50,
529
+ guidance_scale: float = 7.5,
530
+ negative_prompt: Optional[Union[str, List[str]]] = None,
531
+ num_images_per_prompt: Optional[int] = 1,
532
+ eta: float = 0.0,
533
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
534
+ latents: Optional[torch.FloatTensor] = None,
535
+ prompt_embeds: Optional[torch.FloatTensor] = None,
536
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
537
+ output_type: Optional[str] = "pil",
538
+ return_dict: bool = True,
539
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
540
+ callback_steps: int = 1,
541
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
542
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
543
+ guess_mode: bool = False,
544
+ control_guidance_start: Union[float, List[float]] = 0.0,
545
+ control_guidance_end: Union[float, List[float]] = 1.0,
546
+ ):
547
+ r"""
548
+ Function invoked when calling the pipeline for generation.
549
+
550
+ Args:
551
+ prompt (`str` or `List[str]`, *optional*):
552
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
553
+ instead.
554
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
555
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
556
+ The initial image will be used as the starting point for the image generation process. Can also accept
557
+ image latents as `image`, if passing latents directly, it will not be encoded again.
558
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
559
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
560
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
561
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
562
+ also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
563
+ height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
564
+ specified in init, images must be passed as a list such that each element of the list can be correctly
565
+ batched for input to a single controlnet.
566
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
567
+ The height in pixels of the generated image.
568
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
569
+ The width in pixels of the generated image.
570
+ num_inference_steps (`int`, *optional*, defaults to 50):
571
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
572
+ expense of slower inference.
573
+ guidance_scale (`float`, *optional*, defaults to 7.5):
574
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
575
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
576
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
577
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
578
+ usually at the expense of lower image quality.
579
+ negative_prompt (`str` or `List[str]`, *optional*):
580
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
581
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
582
+ less than `1`).
583
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
584
+ The number of images to generate per prompt.
585
+ eta (`float`, *optional*, defaults to 0.0):
586
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
587
+ [`schedulers.DDIMScheduler`], will be ignored for others.
588
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
589
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
590
+ to make generation deterministic.
591
+ latents (`torch.FloatTensor`, *optional*):
592
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
593
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
594
+ tensor will ge generated by sampling using the supplied random `generator`.
595
+ prompt_embeds (`torch.FloatTensor`, *optional*):
596
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
597
+ provided, text embeddings will be generated from `prompt` input argument.
598
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
599
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
600
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
601
+ argument.
602
+ output_type (`str`, *optional*, defaults to `"pil"`):
603
+ The output format of the generate image. Choose between
604
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
605
+ return_dict (`bool`, *optional*, defaults to `True`):
606
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
607
+ plain tuple.
608
+ callback (`Callable`, *optional*):
609
+ A function that will be called every `callback_steps` steps during inference. The function will be
610
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
611
+ callback_steps (`int`, *optional*, defaults to 1):
612
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
613
+ called at every step.
614
+ cross_attention_kwargs (`dict`, *optional*):
615
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
616
+ `self.processor` in
617
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
618
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
619
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
620
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
621
+ corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
622
+ than for [`~StableDiffusionControlNetPipeline.__call__`].
623
+ guess_mode (`bool`, *optional*, defaults to `False`):
624
+ In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
625
+ you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
626
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
627
+ The percentage of total steps at which the controlnet starts applying.
628
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
629
+ The percentage of total steps at which the controlnet stops applying.
630
+
631
+ Examples:
632
+
633
+ Returns:
634
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
635
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
636
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
637
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
638
+ (nsfw) content, according to the `safety_checker`.
639
+ """
640
+ if fp16:
641
+ torch_dtype = torch.float16
642
+ np_dtype = np.float16
643
+ else:
644
+ torch_dtype = torch.float32
645
+ np_dtype = np.float32
646
+
647
+ # align format for control guidance
648
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
649
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
650
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
651
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
652
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
653
+ mult = num_controlnet
654
+ control_guidance_start, control_guidance_end = (
655
+ mult * [control_guidance_start],
656
+ mult * [control_guidance_end],
657
+ )
658
+
659
+ # 1. Check inputs. Raise error if not correct
660
+ self.check_inputs(
661
+ num_controlnet,
662
+ prompt,
663
+ control_image,
664
+ callback_steps,
665
+ negative_prompt,
666
+ prompt_embeds,
667
+ negative_prompt_embeds,
668
+ controlnet_conditioning_scale,
669
+ control_guidance_start,
670
+ control_guidance_end,
671
+ )
672
+
673
+ # 2. Define call parameters
674
+ if prompt is not None and isinstance(prompt, str):
675
+ batch_size = 1
676
+ elif prompt is not None and isinstance(prompt, list):
677
+ batch_size = len(prompt)
678
+ else:
679
+ batch_size = prompt_embeds.shape[0]
680
+
681
+ device = self._execution_device
682
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
683
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
684
+ # corresponds to doing no classifier free guidance.
685
+ do_classifier_free_guidance = guidance_scale > 1.0
686
+
687
+ if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
688
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
689
+
690
+ # 3. Encode input prompt
691
+ prompt_embeds = self._encode_prompt(
692
+ prompt,
693
+ num_images_per_prompt,
694
+ do_classifier_free_guidance,
695
+ negative_prompt,
696
+ prompt_embeds=prompt_embeds,
697
+ negative_prompt_embeds=negative_prompt_embeds,
698
+ )
699
+ # 4. Prepare image
700
+ image = self.image_processor.preprocess(image).to(dtype=torch.float32)
701
+
702
+ # 5. Prepare controlnet_conditioning_image
703
+ if num_controlnet == 1:
704
+ control_image = self.prepare_control_image(
705
+ image=control_image,
706
+ width=width,
707
+ height=height,
708
+ batch_size=batch_size * num_images_per_prompt,
709
+ num_images_per_prompt=num_images_per_prompt,
710
+ device=device,
711
+ dtype=torch_dtype,
712
+ do_classifier_free_guidance=do_classifier_free_guidance,
713
+ guess_mode=guess_mode,
714
+ )
715
+ elif num_controlnet > 1:
716
+ control_images = []
717
+
718
+ for control_image_ in control_image:
719
+ control_image_ = self.prepare_control_image(
720
+ image=control_image_,
721
+ width=width,
722
+ height=height,
723
+ batch_size=batch_size * num_images_per_prompt,
724
+ num_images_per_prompt=num_images_per_prompt,
725
+ device=device,
726
+ dtype=torch_dtype,
727
+ do_classifier_free_guidance=do_classifier_free_guidance,
728
+ guess_mode=guess_mode,
729
+ )
730
+
731
+ control_images.append(control_image_)
732
+
733
+ control_image = control_images
734
+ else:
735
+ assert False
736
+
737
+ # 5. Prepare timesteps
738
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
739
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
740
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
741
+
742
+ # 6. Prepare latent variables
743
+ latents = self.prepare_latents(
744
+ image,
745
+ latent_timestep,
746
+ batch_size,
747
+ num_images_per_prompt,
748
+ torch_dtype,
749
+ device,
750
+ generator,
751
+ )
752
+
753
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
754
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
755
+
756
+ # 7.1 Create tensor stating which controlnets to keep
757
+ controlnet_keep = []
758
+ for i in range(len(timesteps)):
759
+ keeps = [
760
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
761
+ for s, e in zip(control_guidance_start, control_guidance_end)
762
+ ]
763
+ controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
764
+
765
+ # 8. Denoising loop
766
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
767
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
768
+ for i, t in enumerate(timesteps):
769
+ # expand the latents if we are doing classifier free guidance
770
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
771
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
772
+
773
+ if isinstance(controlnet_keep[i], list):
774
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
775
+ else:
776
+ controlnet_cond_scale = controlnet_conditioning_scale
777
+ if isinstance(controlnet_cond_scale, list):
778
+ controlnet_cond_scale = controlnet_cond_scale[0]
779
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
780
+
781
+ # predict the noise residual
782
+ _latent_model_input = latent_model_input.cpu().detach().numpy()
783
+ _prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
784
+ _t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
785
+
786
+ if num_controlnet == 1:
787
+ control_images = np.array([control_image], dtype=np_dtype)
788
+ else:
789
+ control_images = []
790
+ for _control_img in control_image:
791
+ _control_img = _control_img.cpu().detach().numpy()
792
+ control_images.append(_control_img)
793
+ control_images = np.array(control_images, dtype=np_dtype)
794
+
795
+ control_scales = np.array(cond_scale, dtype=np_dtype)
796
+ control_scales = np.resize(control_scales, (num_controlnet, 1))
797
+
798
+ noise_pred = self.unet(
799
+ sample=_latent_model_input,
800
+ timestep=_t,
801
+ encoder_hidden_states=_prompt_embeds,
802
+ controlnet_conds=control_images,
803
+ conditioning_scales=control_scales,
804
+ )[0]
805
+ noise_pred = torch.from_numpy(noise_pred).to(device)
806
+
807
+ # perform guidance
808
+ if do_classifier_free_guidance:
809
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
810
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
811
+
812
+ # compute the previous noisy sample x_t -> x_t-1
813
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
814
+
815
+ # call the callback, if provided
816
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
817
+ progress_bar.update()
818
+ if callback is not None and i % callback_steps == 0:
819
+ step_idx = i // getattr(self.scheduler, "order", 1)
820
+ callback(step_idx, t, latents)
821
+
822
+ if not output_type == "latent":
823
+ _latents = latents.cpu().detach().numpy() / 0.18215
824
+ _latents = np.array(_latents, dtype=np_dtype)
825
+ image = self.vae_decoder(latent_sample=_latents)[0]
826
+ image = torch.from_numpy(image).to(device, dtype=torch.float32)
827
+ has_nsfw_concept = None
828
+ else:
829
+ image = latents
830
+ has_nsfw_concept = None
831
+
832
+ if has_nsfw_concept is None:
833
+ do_denormalize = [True] * image.shape[0]
834
+ else:
835
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
836
+
837
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
838
+
839
+ if not return_dict:
840
+ return (image, has_nsfw_concept)
841
+
842
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
843
+
844
+
845
+ if __name__ == "__main__":
846
+ parser = argparse.ArgumentParser()
847
+
848
+ parser.add_argument(
849
+ "--sd_model",
850
+ type=str,
851
+ required=True,
852
+ help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
853
+ )
854
+
855
+ parser.add_argument(
856
+ "--onnx_model_dir",
857
+ type=str,
858
+ required=True,
859
+ help="Path to the ONNX directory",
860
+ )
861
+
862
+ parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
863
+
864
+ args = parser.parse_args()
865
+
866
+ qr_image = Image.open(args.qr_img_path)
867
+ qr_image = qr_image.resize((512, 512))
868
+
869
+ # init stable diffusion pipeline
870
+ pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
871
+ pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
872
+
873
+ provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
874
+ onnx_pipeline = OnnxStableDiffusionControlNetImg2ImgPipeline(
875
+ vae_encoder=OnnxRuntimeModel.from_pretrained(
876
+ os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
877
+ ),
878
+ vae_decoder=OnnxRuntimeModel.from_pretrained(
879
+ os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
880
+ ),
881
+ text_encoder=OnnxRuntimeModel.from_pretrained(
882
+ os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
883
+ ),
884
+ tokenizer=pipeline.tokenizer,
885
+ unet=OnnxRuntimeModel.from_pretrained(os.path.join(args.onnx_model_dir, "unet"), provider=provider),
886
+ scheduler=pipeline.scheduler,
887
+ )
888
+ onnx_pipeline = onnx_pipeline.to("cuda")
889
+
890
+ prompt = "a cute cat fly to the moon"
891
+ negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
892
+
893
+ for i in range(10):
894
+ start_time = time.time()
895
+ image = onnx_pipeline(
896
+ num_controlnet=2,
897
+ prompt=prompt,
898
+ negative_prompt=negative_prompt,
899
+ image=qr_image,
900
+ control_image=[qr_image, qr_image],
901
+ width=512,
902
+ height=512,
903
+ strength=0.75,
904
+ num_inference_steps=20,
905
+ num_images_per_prompt=1,
906
+ controlnet_conditioning_scale=[0.8, 0.8],
907
+ control_guidance_start=[0.3, 0.3],
908
+ control_guidance_end=[0.9, 0.9],
909
+ ).images[0]
910
+ print(time.time() - start_time)
911
+ image.save("output_qr_code.png")
v0.24.0/run_tensorrt_controlnet.py ADDED
@@ -0,0 +1,1022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import atexit
3
+ import inspect
4
+ import os
5
+ import time
6
+ import warnings
7
+ from typing import Any, Callable, Dict, List, Optional, Union
8
+
9
+ import numpy as np
10
+ import PIL.Image
11
+ import pycuda.driver as cuda
12
+ import tensorrt as trt
13
+ import torch
14
+ from PIL import Image
15
+ from pycuda.tools import make_default_context
16
+ from transformers import CLIPTokenizer
17
+
18
+ from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
19
+ from diffusers.image_processor import VaeImageProcessor
20
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
21
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
22
+ from diffusers.schedulers import KarrasDiffusionSchedulers
23
+ from diffusers.utils import (
24
+ deprecate,
25
+ logging,
26
+ replace_example_docstring,
27
+ )
28
+ from diffusers.utils.torch_utils import randn_tensor
29
+
30
+
31
+ # Initialize CUDA
32
+ cuda.init()
33
+ context = make_default_context()
34
+ device = context.get_device()
35
+ atexit.register(context.pop)
36
+
37
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
38
+
39
+
40
+ def load_engine(trt_runtime, engine_path):
41
+ with open(engine_path, "rb") as f:
42
+ engine_data = f.read()
43
+ engine = trt_runtime.deserialize_cuda_engine(engine_data)
44
+ return engine
45
+
46
+
47
+ class TensorRTModel:
48
+ def __init__(
49
+ self,
50
+ trt_engine_path,
51
+ **kwargs,
52
+ ):
53
+ cuda.init()
54
+ stream = cuda.Stream()
55
+ TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
56
+ trt.init_libnvinfer_plugins(TRT_LOGGER, "")
57
+ trt_runtime = trt.Runtime(TRT_LOGGER)
58
+ engine = load_engine(trt_runtime, trt_engine_path)
59
+ context = engine.create_execution_context()
60
+
61
+ # allocates memory for network inputs/outputs on both CPU and GPU
62
+ host_inputs = []
63
+ cuda_inputs = []
64
+ host_outputs = []
65
+ cuda_outputs = []
66
+ bindings = []
67
+ input_names = []
68
+ output_names = []
69
+
70
+ for binding in engine:
71
+ datatype = engine.get_binding_dtype(binding)
72
+ if datatype == trt.DataType.HALF:
73
+ dtype = np.float16
74
+ else:
75
+ dtype = np.float32
76
+
77
+ shape = tuple(engine.get_binding_shape(binding))
78
+ host_mem = cuda.pagelocked_empty(shape, dtype)
79
+ cuda_mem = cuda.mem_alloc(host_mem.nbytes)
80
+ bindings.append(int(cuda_mem))
81
+
82
+ if engine.binding_is_input(binding):
83
+ host_inputs.append(host_mem)
84
+ cuda_inputs.append(cuda_mem)
85
+ input_names.append(binding)
86
+ else:
87
+ host_outputs.append(host_mem)
88
+ cuda_outputs.append(cuda_mem)
89
+ output_names.append(binding)
90
+
91
+ self.stream = stream
92
+ self.context = context
93
+ self.engine = engine
94
+
95
+ self.host_inputs = host_inputs
96
+ self.cuda_inputs = cuda_inputs
97
+ self.host_outputs = host_outputs
98
+ self.cuda_outputs = cuda_outputs
99
+ self.bindings = bindings
100
+ self.batch_size = engine.max_batch_size
101
+
102
+ self.input_names = input_names
103
+ self.output_names = output_names
104
+
105
+ def __call__(self, **kwargs):
106
+ context = self.context
107
+ stream = self.stream
108
+ bindings = self.bindings
109
+
110
+ host_inputs = self.host_inputs
111
+ cuda_inputs = self.cuda_inputs
112
+ host_outputs = self.host_outputs
113
+ cuda_outputs = self.cuda_outputs
114
+
115
+ for idx, input_name in enumerate(self.input_names):
116
+ _input = kwargs[input_name]
117
+ np.copyto(host_inputs[idx], _input)
118
+ # transfer input data to the GPU
119
+ cuda.memcpy_htod_async(cuda_inputs[idx], host_inputs[idx], stream)
120
+
121
+ context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
122
+
123
+ result = {}
124
+ for idx, output_name in enumerate(self.output_names):
125
+ # transfer predictions back from the GPU
126
+ cuda.memcpy_dtoh_async(host_outputs[idx], cuda_outputs[idx], stream)
127
+ result[output_name] = host_outputs[idx]
128
+
129
+ stream.synchronize()
130
+
131
+ return result
132
+
133
+
134
+ EXAMPLE_DOC_STRING = """
135
+ Examples:
136
+ ```py
137
+ >>> # !pip install opencv-python transformers accelerate
138
+ >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
139
+ >>> from diffusers.utils import load_image
140
+ >>> import numpy as np
141
+ >>> import torch
142
+
143
+ >>> import cv2
144
+ >>> from PIL import Image
145
+
146
+ >>> # download an image
147
+ >>> image = load_image(
148
+ ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
149
+ ... )
150
+ >>> np_image = np.array(image)
151
+
152
+ >>> # get canny image
153
+ >>> np_image = cv2.Canny(np_image, 100, 200)
154
+ >>> np_image = np_image[:, :, None]
155
+ >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
156
+ >>> canny_image = Image.fromarray(np_image)
157
+
158
+ >>> # load control net and stable diffusion v1-5
159
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
160
+ >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
161
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
162
+ ... )
163
+
164
+ >>> # speed up diffusion process with faster scheduler and memory optimization
165
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
166
+ >>> pipe.enable_model_cpu_offload()
167
+
168
+ >>> # generate image
169
+ >>> generator = torch.manual_seed(0)
170
+ >>> image = pipe(
171
+ ... "futuristic-looking woman",
172
+ ... num_inference_steps=20,
173
+ ... generator=generator,
174
+ ... image=image,
175
+ ... control_image=canny_image,
176
+ ... ).images[0]
177
+ ```
178
+ """
179
+
180
+
181
+ def prepare_image(image):
182
+ if isinstance(image, torch.Tensor):
183
+ # Batch single image
184
+ if image.ndim == 3:
185
+ image = image.unsqueeze(0)
186
+
187
+ image = image.to(dtype=torch.float32)
188
+ else:
189
+ # preprocess image
190
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
191
+ image = [image]
192
+
193
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
194
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
195
+ image = np.concatenate(image, axis=0)
196
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
197
+ image = np.concatenate([i[None, :] for i in image], axis=0)
198
+
199
+ image = image.transpose(0, 3, 1, 2)
200
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
201
+
202
+ return image
203
+
204
+
205
+ class TensorRTStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
206
+ vae_encoder: OnnxRuntimeModel
207
+ vae_decoder: OnnxRuntimeModel
208
+ text_encoder: OnnxRuntimeModel
209
+ tokenizer: CLIPTokenizer
210
+ unet: TensorRTModel
211
+ scheduler: KarrasDiffusionSchedulers
212
+
213
+ def __init__(
214
+ self,
215
+ vae_encoder: OnnxRuntimeModel,
216
+ vae_decoder: OnnxRuntimeModel,
217
+ text_encoder: OnnxRuntimeModel,
218
+ tokenizer: CLIPTokenizer,
219
+ unet: TensorRTModel,
220
+ scheduler: KarrasDiffusionSchedulers,
221
+ ):
222
+ super().__init__()
223
+
224
+ self.register_modules(
225
+ vae_encoder=vae_encoder,
226
+ vae_decoder=vae_decoder,
227
+ text_encoder=text_encoder,
228
+ tokenizer=tokenizer,
229
+ unet=unet,
230
+ scheduler=scheduler,
231
+ )
232
+ self.vae_scale_factor = 2 ** (4 - 1)
233
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
234
+ self.control_image_processor = VaeImageProcessor(
235
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
236
+ )
237
+
238
+ def _encode_prompt(
239
+ self,
240
+ prompt: Union[str, List[str]],
241
+ num_images_per_prompt: Optional[int],
242
+ do_classifier_free_guidance: bool,
243
+ negative_prompt: Optional[str],
244
+ prompt_embeds: Optional[np.ndarray] = None,
245
+ negative_prompt_embeds: Optional[np.ndarray] = None,
246
+ ):
247
+ r"""
248
+ Encodes the prompt into text encoder hidden states.
249
+
250
+ Args:
251
+ prompt (`str` or `List[str]`):
252
+ prompt to be encoded
253
+ num_images_per_prompt (`int`):
254
+ number of images that should be generated per prompt
255
+ do_classifier_free_guidance (`bool`):
256
+ whether to use classifier free guidance or not
257
+ negative_prompt (`str` or `List[str]`):
258
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
259
+ if `guidance_scale` is less than `1`).
260
+ prompt_embeds (`np.ndarray`, *optional*):
261
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
262
+ provided, text embeddings will be generated from `prompt` input argument.
263
+ negative_prompt_embeds (`np.ndarray`, *optional*):
264
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
265
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
266
+ argument.
267
+ """
268
+ if prompt is not None and isinstance(prompt, str):
269
+ batch_size = 1
270
+ elif prompt is not None and isinstance(prompt, list):
271
+ batch_size = len(prompt)
272
+ else:
273
+ batch_size = prompt_embeds.shape[0]
274
+
275
+ if prompt_embeds is None:
276
+ # get prompt text embeddings
277
+ text_inputs = self.tokenizer(
278
+ prompt,
279
+ padding="max_length",
280
+ max_length=self.tokenizer.model_max_length,
281
+ truncation=True,
282
+ return_tensors="np",
283
+ )
284
+ text_input_ids = text_inputs.input_ids
285
+ untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
286
+
287
+ if not np.array_equal(text_input_ids, untruncated_ids):
288
+ removed_text = self.tokenizer.batch_decode(
289
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
290
+ )
291
+ logger.warning(
292
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
293
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
294
+ )
295
+
296
+ prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
297
+
298
+ prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
299
+
300
+ # get unconditional embeddings for classifier free guidance
301
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
302
+ uncond_tokens: List[str]
303
+ if negative_prompt is None:
304
+ uncond_tokens = [""] * batch_size
305
+ elif type(prompt) is not type(negative_prompt):
306
+ raise TypeError(
307
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
308
+ f" {type(prompt)}."
309
+ )
310
+ elif isinstance(negative_prompt, str):
311
+ uncond_tokens = [negative_prompt] * batch_size
312
+ elif batch_size != len(negative_prompt):
313
+ raise ValueError(
314
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
315
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
316
+ " the batch size of `prompt`."
317
+ )
318
+ else:
319
+ uncond_tokens = negative_prompt
320
+
321
+ max_length = prompt_embeds.shape[1]
322
+ uncond_input = self.tokenizer(
323
+ uncond_tokens,
324
+ padding="max_length",
325
+ max_length=max_length,
326
+ truncation=True,
327
+ return_tensors="np",
328
+ )
329
+ negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
330
+
331
+ if do_classifier_free_guidance:
332
+ negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
333
+
334
+ # For classifier free guidance, we need to do two forward passes.
335
+ # Here we concatenate the unconditional and text embeddings into a single batch
336
+ # to avoid doing two forward passes
337
+ prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
338
+
339
+ return prompt_embeds
340
+
341
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
342
+ def decode_latents(self, latents):
343
+ warnings.warn(
344
+ "The decode_latents method is deprecated and will be removed in a future version. Please"
345
+ " use VaeImageProcessor instead",
346
+ FutureWarning,
347
+ )
348
+ latents = 1 / self.vae.config.scaling_factor * latents
349
+ image = self.vae.decode(latents, return_dict=False)[0]
350
+ image = (image / 2 + 0.5).clamp(0, 1)
351
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
352
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
353
+ return image
354
+
355
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
356
+ def prepare_extra_step_kwargs(self, generator, eta):
357
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
358
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
359
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
360
+ # and should be between [0, 1]
361
+
362
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
363
+ extra_step_kwargs = {}
364
+ if accepts_eta:
365
+ extra_step_kwargs["eta"] = eta
366
+
367
+ # check if the scheduler accepts generator
368
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
369
+ if accepts_generator:
370
+ extra_step_kwargs["generator"] = generator
371
+ return extra_step_kwargs
372
+
373
+ def check_inputs(
374
+ self,
375
+ num_controlnet,
376
+ prompt,
377
+ image,
378
+ callback_steps,
379
+ negative_prompt=None,
380
+ prompt_embeds=None,
381
+ negative_prompt_embeds=None,
382
+ controlnet_conditioning_scale=1.0,
383
+ control_guidance_start=0.0,
384
+ control_guidance_end=1.0,
385
+ ):
386
+ if (callback_steps is None) or (
387
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
388
+ ):
389
+ raise ValueError(
390
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
391
+ f" {type(callback_steps)}."
392
+ )
393
+
394
+ if prompt is not None and prompt_embeds is not None:
395
+ raise ValueError(
396
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
397
+ " only forward one of the two."
398
+ )
399
+ elif prompt is None and prompt_embeds is None:
400
+ raise ValueError(
401
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
402
+ )
403
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
404
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
405
+
406
+ if negative_prompt is not None and negative_prompt_embeds is not None:
407
+ raise ValueError(
408
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
409
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
410
+ )
411
+
412
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
413
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
414
+ raise ValueError(
415
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
416
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
417
+ f" {negative_prompt_embeds.shape}."
418
+ )
419
+
420
+ # Check `image`
421
+ if num_controlnet == 1:
422
+ self.check_image(image, prompt, prompt_embeds)
423
+ elif num_controlnet > 1:
424
+ if not isinstance(image, list):
425
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
426
+
427
+ # When `image` is a nested list:
428
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
429
+ elif any(isinstance(i, list) for i in image):
430
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
431
+ elif len(image) != num_controlnet:
432
+ raise ValueError(
433
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
434
+ )
435
+
436
+ for image_ in image:
437
+ self.check_image(image_, prompt, prompt_embeds)
438
+ else:
439
+ assert False
440
+
441
+ # Check `controlnet_conditioning_scale`
442
+ if num_controlnet == 1:
443
+ if not isinstance(controlnet_conditioning_scale, float):
444
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
445
+ elif num_controlnet > 1:
446
+ if isinstance(controlnet_conditioning_scale, list):
447
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
448
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
449
+ elif (
450
+ isinstance(controlnet_conditioning_scale, list)
451
+ and len(controlnet_conditioning_scale) != num_controlnet
452
+ ):
453
+ raise ValueError(
454
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
455
+ " the same length as the number of controlnets"
456
+ )
457
+ else:
458
+ assert False
459
+
460
+ if len(control_guidance_start) != len(control_guidance_end):
461
+ raise ValueError(
462
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
463
+ )
464
+
465
+ if num_controlnet > 1:
466
+ if len(control_guidance_start) != num_controlnet:
467
+ raise ValueError(
468
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
469
+ )
470
+
471
+ for start, end in zip(control_guidance_start, control_guidance_end):
472
+ if start >= end:
473
+ raise ValueError(
474
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
475
+ )
476
+ if start < 0.0:
477
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
478
+ if end > 1.0:
479
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
480
+
481
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
482
+ def check_image(self, image, prompt, prompt_embeds):
483
+ image_is_pil = isinstance(image, PIL.Image.Image)
484
+ image_is_tensor = isinstance(image, torch.Tensor)
485
+ image_is_np = isinstance(image, np.ndarray)
486
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
487
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
488
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
489
+
490
+ if (
491
+ not image_is_pil
492
+ and not image_is_tensor
493
+ and not image_is_np
494
+ and not image_is_pil_list
495
+ and not image_is_tensor_list
496
+ and not image_is_np_list
497
+ ):
498
+ raise TypeError(
499
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
500
+ )
501
+
502
+ if image_is_pil:
503
+ image_batch_size = 1
504
+ else:
505
+ image_batch_size = len(image)
506
+
507
+ if prompt is not None and isinstance(prompt, str):
508
+ prompt_batch_size = 1
509
+ elif prompt is not None and isinstance(prompt, list):
510
+ prompt_batch_size = len(prompt)
511
+ elif prompt_embeds is not None:
512
+ prompt_batch_size = prompt_embeds.shape[0]
513
+
514
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
515
+ raise ValueError(
516
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
517
+ )
518
+
519
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
520
+ def prepare_control_image(
521
+ self,
522
+ image,
523
+ width,
524
+ height,
525
+ batch_size,
526
+ num_images_per_prompt,
527
+ device,
528
+ dtype,
529
+ do_classifier_free_guidance=False,
530
+ guess_mode=False,
531
+ ):
532
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
533
+ image_batch_size = image.shape[0]
534
+
535
+ if image_batch_size == 1:
536
+ repeat_by = batch_size
537
+ else:
538
+ # image batch size is the same as prompt batch size
539
+ repeat_by = num_images_per_prompt
540
+
541
+ image = image.repeat_interleave(repeat_by, dim=0)
542
+
543
+ image = image.to(device=device, dtype=dtype)
544
+
545
+ if do_classifier_free_guidance and not guess_mode:
546
+ image = torch.cat([image] * 2)
547
+
548
+ return image
549
+
550
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
551
+ def get_timesteps(self, num_inference_steps, strength, device):
552
+ # get the original timestep using init_timestep
553
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
554
+
555
+ t_start = max(num_inference_steps - init_timestep, 0)
556
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
557
+
558
+ return timesteps, num_inference_steps - t_start
559
+
560
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
561
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
562
+ raise ValueError(
563
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
564
+ )
565
+
566
+ image = image.to(device=device, dtype=dtype)
567
+
568
+ batch_size = batch_size * num_images_per_prompt
569
+
570
+ if image.shape[1] == 4:
571
+ init_latents = image
572
+
573
+ else:
574
+ _image = image.cpu().detach().numpy()
575
+ init_latents = self.vae_encoder(sample=_image)[0]
576
+ init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
577
+ init_latents = 0.18215 * init_latents
578
+
579
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
580
+ # expand init_latents for batch_size
581
+ deprecation_message = (
582
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
583
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
584
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
585
+ " your script to pass as many initial images as text prompts to suppress this warning."
586
+ )
587
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
588
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
589
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
590
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
591
+ raise ValueError(
592
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
593
+ )
594
+ else:
595
+ init_latents = torch.cat([init_latents], dim=0)
596
+
597
+ shape = init_latents.shape
598
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
599
+
600
+ # get latents
601
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
602
+ latents = init_latents
603
+
604
+ return latents
605
+
606
+ @torch.no_grad()
607
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
608
+ def __call__(
609
+ self,
610
+ num_controlnet: int,
611
+ fp16: bool = True,
612
+ prompt: Union[str, List[str]] = None,
613
+ image: Union[
614
+ torch.FloatTensor,
615
+ PIL.Image.Image,
616
+ np.ndarray,
617
+ List[torch.FloatTensor],
618
+ List[PIL.Image.Image],
619
+ List[np.ndarray],
620
+ ] = None,
621
+ control_image: Union[
622
+ torch.FloatTensor,
623
+ PIL.Image.Image,
624
+ np.ndarray,
625
+ List[torch.FloatTensor],
626
+ List[PIL.Image.Image],
627
+ List[np.ndarray],
628
+ ] = None,
629
+ height: Optional[int] = None,
630
+ width: Optional[int] = None,
631
+ strength: float = 0.8,
632
+ num_inference_steps: int = 50,
633
+ guidance_scale: float = 7.5,
634
+ negative_prompt: Optional[Union[str, List[str]]] = None,
635
+ num_images_per_prompt: Optional[int] = 1,
636
+ eta: float = 0.0,
637
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
638
+ latents: Optional[torch.FloatTensor] = None,
639
+ prompt_embeds: Optional[torch.FloatTensor] = None,
640
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
641
+ output_type: Optional[str] = "pil",
642
+ return_dict: bool = True,
643
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
644
+ callback_steps: int = 1,
645
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
646
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
647
+ guess_mode: bool = False,
648
+ control_guidance_start: Union[float, List[float]] = 0.0,
649
+ control_guidance_end: Union[float, List[float]] = 1.0,
650
+ ):
651
+ r"""
652
+ Function invoked when calling the pipeline for generation.
653
+
654
+ Args:
655
+ prompt (`str` or `List[str]`, *optional*):
656
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
657
+ instead.
658
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
659
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
660
+ The initial image will be used as the starting point for the image generation process. Can also accept
661
+ image latents as `image`, if passing latents directly, it will not be encoded again.
662
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
663
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
664
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
665
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
666
+ also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
667
+ height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
668
+ specified in init, images must be passed as a list such that each element of the list can be correctly
669
+ batched for input to a single controlnet.
670
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
671
+ The height in pixels of the generated image.
672
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
673
+ The width in pixels of the generated image.
674
+ num_inference_steps (`int`, *optional*, defaults to 50):
675
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
676
+ expense of slower inference.
677
+ guidance_scale (`float`, *optional*, defaults to 7.5):
678
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
679
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
680
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
681
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
682
+ usually at the expense of lower image quality.
683
+ negative_prompt (`str` or `List[str]`, *optional*):
684
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
685
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
686
+ less than `1`).
687
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
688
+ The number of images to generate per prompt.
689
+ eta (`float`, *optional*, defaults to 0.0):
690
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
691
+ [`schedulers.DDIMScheduler`], will be ignored for others.
692
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
693
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
694
+ to make generation deterministic.
695
+ latents (`torch.FloatTensor`, *optional*):
696
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
697
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
698
+ tensor will ge generated by sampling using the supplied random `generator`.
699
+ prompt_embeds (`torch.FloatTensor`, *optional*):
700
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
701
+ provided, text embeddings will be generated from `prompt` input argument.
702
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
703
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
704
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
705
+ argument.
706
+ output_type (`str`, *optional*, defaults to `"pil"`):
707
+ The output format of the generate image. Choose between
708
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
709
+ return_dict (`bool`, *optional*, defaults to `True`):
710
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
711
+ plain tuple.
712
+ callback (`Callable`, *optional*):
713
+ A function that will be called every `callback_steps` steps during inference. The function will be
714
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
715
+ callback_steps (`int`, *optional*, defaults to 1):
716
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
717
+ called at every step.
718
+ cross_attention_kwargs (`dict`, *optional*):
719
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
720
+ `self.processor` in
721
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
722
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
723
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
724
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
725
+ corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
726
+ than for [`~StableDiffusionControlNetPipeline.__call__`].
727
+ guess_mode (`bool`, *optional*, defaults to `False`):
728
+ In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
729
+ you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
730
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
731
+ The percentage of total steps at which the controlnet starts applying.
732
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
733
+ The percentage of total steps at which the controlnet stops applying.
734
+
735
+ Examples:
736
+
737
+ Returns:
738
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
739
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
740
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
741
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
742
+ (nsfw) content, according to the `safety_checker`.
743
+ """
744
+ if fp16:
745
+ torch_dtype = torch.float16
746
+ np_dtype = np.float16
747
+ else:
748
+ torch_dtype = torch.float32
749
+ np_dtype = np.float32
750
+
751
+ # align format for control guidance
752
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
753
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
754
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
755
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
756
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
757
+ mult = num_controlnet
758
+ control_guidance_start, control_guidance_end = (
759
+ mult * [control_guidance_start],
760
+ mult * [control_guidance_end],
761
+ )
762
+
763
+ # 1. Check inputs. Raise error if not correct
764
+ self.check_inputs(
765
+ num_controlnet,
766
+ prompt,
767
+ control_image,
768
+ callback_steps,
769
+ negative_prompt,
770
+ prompt_embeds,
771
+ negative_prompt_embeds,
772
+ controlnet_conditioning_scale,
773
+ control_guidance_start,
774
+ control_guidance_end,
775
+ )
776
+
777
+ # 2. Define call parameters
778
+ if prompt is not None and isinstance(prompt, str):
779
+ batch_size = 1
780
+ elif prompt is not None and isinstance(prompt, list):
781
+ batch_size = len(prompt)
782
+ else:
783
+ batch_size = prompt_embeds.shape[0]
784
+
785
+ device = self._execution_device
786
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
787
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
788
+ # corresponds to doing no classifier free guidance.
789
+ do_classifier_free_guidance = guidance_scale > 1.0
790
+
791
+ if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
792
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
793
+
794
+ # 3. Encode input prompt
795
+ prompt_embeds = self._encode_prompt(
796
+ prompt,
797
+ num_images_per_prompt,
798
+ do_classifier_free_guidance,
799
+ negative_prompt,
800
+ prompt_embeds=prompt_embeds,
801
+ negative_prompt_embeds=negative_prompt_embeds,
802
+ )
803
+ # 4. Prepare image
804
+ image = self.image_processor.preprocess(image).to(dtype=torch.float32)
805
+
806
+ # 5. Prepare controlnet_conditioning_image
807
+ if num_controlnet == 1:
808
+ control_image = self.prepare_control_image(
809
+ image=control_image,
810
+ width=width,
811
+ height=height,
812
+ batch_size=batch_size * num_images_per_prompt,
813
+ num_images_per_prompt=num_images_per_prompt,
814
+ device=device,
815
+ dtype=torch_dtype,
816
+ do_classifier_free_guidance=do_classifier_free_guidance,
817
+ guess_mode=guess_mode,
818
+ )
819
+ elif num_controlnet > 1:
820
+ control_images = []
821
+
822
+ for control_image_ in control_image:
823
+ control_image_ = self.prepare_control_image(
824
+ image=control_image_,
825
+ width=width,
826
+ height=height,
827
+ batch_size=batch_size * num_images_per_prompt,
828
+ num_images_per_prompt=num_images_per_prompt,
829
+ device=device,
830
+ dtype=torch_dtype,
831
+ do_classifier_free_guidance=do_classifier_free_guidance,
832
+ guess_mode=guess_mode,
833
+ )
834
+
835
+ control_images.append(control_image_)
836
+
837
+ control_image = control_images
838
+ else:
839
+ assert False
840
+
841
+ # 5. Prepare timesteps
842
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
843
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
844
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
845
+
846
+ # 6. Prepare latent variables
847
+ latents = self.prepare_latents(
848
+ image,
849
+ latent_timestep,
850
+ batch_size,
851
+ num_images_per_prompt,
852
+ torch_dtype,
853
+ device,
854
+ generator,
855
+ )
856
+
857
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
858
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
859
+
860
+ # 7.1 Create tensor stating which controlnets to keep
861
+ controlnet_keep = []
862
+ for i in range(len(timesteps)):
863
+ keeps = [
864
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
865
+ for s, e in zip(control_guidance_start, control_guidance_end)
866
+ ]
867
+ controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
868
+
869
+ # 8. Denoising loop
870
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
871
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
872
+ for i, t in enumerate(timesteps):
873
+ # expand the latents if we are doing classifier free guidance
874
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
875
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
876
+
877
+ if isinstance(controlnet_keep[i], list):
878
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
879
+ else:
880
+ controlnet_cond_scale = controlnet_conditioning_scale
881
+ if isinstance(controlnet_cond_scale, list):
882
+ controlnet_cond_scale = controlnet_cond_scale[0]
883
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
884
+
885
+ # predict the noise residual
886
+ _latent_model_input = latent_model_input.cpu().detach().numpy()
887
+ _prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
888
+ _t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
889
+
890
+ if num_controlnet == 1:
891
+ control_images = np.array([control_image], dtype=np_dtype)
892
+ else:
893
+ control_images = []
894
+ for _control_img in control_image:
895
+ _control_img = _control_img.cpu().detach().numpy()
896
+ control_images.append(_control_img)
897
+ control_images = np.array(control_images, dtype=np_dtype)
898
+
899
+ control_scales = np.array(cond_scale, dtype=np_dtype)
900
+ control_scales = np.resize(control_scales, (num_controlnet, 1))
901
+
902
+ noise_pred = self.unet(
903
+ sample=_latent_model_input,
904
+ timestep=_t,
905
+ encoder_hidden_states=_prompt_embeds,
906
+ controlnet_conds=control_images,
907
+ conditioning_scales=control_scales,
908
+ )["noise_pred"]
909
+ noise_pred = torch.from_numpy(noise_pred).to(device)
910
+
911
+ # perform guidance
912
+ if do_classifier_free_guidance:
913
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
914
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
915
+
916
+ # compute the previous noisy sample x_t -> x_t-1
917
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
918
+
919
+ # call the callback, if provided
920
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
921
+ progress_bar.update()
922
+ if callback is not None and i % callback_steps == 0:
923
+ step_idx = i // getattr(self.scheduler, "order", 1)
924
+ callback(step_idx, t, latents)
925
+
926
+ if not output_type == "latent":
927
+ _latents = latents.cpu().detach().numpy() / 0.18215
928
+ _latents = np.array(_latents, dtype=np_dtype)
929
+ image = self.vae_decoder(latent_sample=_latents)[0]
930
+ image = torch.from_numpy(image).to(device, dtype=torch.float32)
931
+ has_nsfw_concept = None
932
+ else:
933
+ image = latents
934
+ has_nsfw_concept = None
935
+
936
+ if has_nsfw_concept is None:
937
+ do_denormalize = [True] * image.shape[0]
938
+ else:
939
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
940
+
941
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
942
+
943
+ if not return_dict:
944
+ return (image, has_nsfw_concept)
945
+
946
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
947
+
948
+
949
+ if __name__ == "__main__":
950
+ parser = argparse.ArgumentParser()
951
+
952
+ parser.add_argument(
953
+ "--sd_model",
954
+ type=str,
955
+ required=True,
956
+ help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
957
+ )
958
+
959
+ parser.add_argument(
960
+ "--onnx_model_dir",
961
+ type=str,
962
+ required=True,
963
+ help="Path to the ONNX directory",
964
+ )
965
+
966
+ parser.add_argument(
967
+ "--unet_engine_path",
968
+ type=str,
969
+ required=True,
970
+ help="Path to the unet + controlnet tensorrt model",
971
+ )
972
+
973
+ parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
974
+
975
+ args = parser.parse_args()
976
+
977
+ qr_image = Image.open(args.qr_img_path)
978
+ qr_image = qr_image.resize((512, 512))
979
+
980
+ # init stable diffusion pipeline
981
+ pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
982
+ pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
983
+
984
+ provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
985
+ onnx_pipeline = TensorRTStableDiffusionControlNetImg2ImgPipeline(
986
+ vae_encoder=OnnxRuntimeModel.from_pretrained(
987
+ os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
988
+ ),
989
+ vae_decoder=OnnxRuntimeModel.from_pretrained(
990
+ os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
991
+ ),
992
+ text_encoder=OnnxRuntimeModel.from_pretrained(
993
+ os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
994
+ ),
995
+ tokenizer=pipeline.tokenizer,
996
+ unet=TensorRTModel(args.unet_engine_path),
997
+ scheduler=pipeline.scheduler,
998
+ )
999
+ onnx_pipeline = onnx_pipeline.to("cuda")
1000
+
1001
+ prompt = "a cute cat fly to the moon"
1002
+ negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
1003
+
1004
+ for i in range(10):
1005
+ start_time = time.time()
1006
+ image = onnx_pipeline(
1007
+ num_controlnet=2,
1008
+ prompt=prompt,
1009
+ negative_prompt=negative_prompt,
1010
+ image=qr_image,
1011
+ control_image=[qr_image, qr_image],
1012
+ width=512,
1013
+ height=512,
1014
+ strength=0.75,
1015
+ num_inference_steps=20,
1016
+ num_images_per_prompt=1,
1017
+ controlnet_conditioning_scale=[0.8, 0.8],
1018
+ control_guidance_start=[0.3, 0.3],
1019
+ control_guidance_end=[0.9, 0.9],
1020
+ ).images[0]
1021
+ print(time.time() - start_time)
1022
+ image.save("output_qr_code.png")
v0.24.0/sd_text2img_k_diffusion.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import warnings
17
+ from typing import Callable, List, Optional, Union
18
+
19
+ import torch
20
+ from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser
21
+
22
+ from diffusers import DiffusionPipeline, LMSDiscreteScheduler
23
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
24
+ from diffusers.utils import is_accelerate_available, logging
25
+
26
+
27
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
+
29
+
30
+ class ModelWrapper:
31
+ def __init__(self, model, alphas_cumprod):
32
+ self.model = model
33
+ self.alphas_cumprod = alphas_cumprod
34
+
35
+ def apply_model(self, *args, **kwargs):
36
+ if len(args) == 3:
37
+ encoder_hidden_states = args[-1]
38
+ args = args[:2]
39
+ if kwargs.get("cond", None) is not None:
40
+ encoder_hidden_states = kwargs.pop("cond")
41
+ return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample
42
+
43
+
44
+ class StableDiffusionPipeline(DiffusionPipeline):
45
+ r"""
46
+ Pipeline for text-to-image generation using Stable Diffusion.
47
+
48
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
49
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
50
+
51
+ Args:
52
+ vae ([`AutoencoderKL`]):
53
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
54
+ text_encoder ([`CLIPTextModel`]):
55
+ Frozen text-encoder. Stable Diffusion uses the text portion of
56
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
57
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
58
+ tokenizer (`CLIPTokenizer`):
59
+ Tokenizer of class
60
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
61
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
62
+ scheduler ([`SchedulerMixin`]):
63
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
64
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
65
+ safety_checker ([`StableDiffusionSafetyChecker`]):
66
+ Classification module that estimates whether generated images could be considered offensive or harmful.
67
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
68
+ feature_extractor ([`CLIPImageProcessor`]):
69
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
70
+ """
71
+
72
+ _optional_components = ["safety_checker", "feature_extractor"]
73
+
74
+ def __init__(
75
+ self,
76
+ vae,
77
+ text_encoder,
78
+ tokenizer,
79
+ unet,
80
+ scheduler,
81
+ safety_checker,
82
+ feature_extractor,
83
+ ):
84
+ super().__init__()
85
+
86
+ if safety_checker is None:
87
+ logger.warning(
88
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
89
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
90
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
91
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
92
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
93
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
94
+ )
95
+
96
+ # get correct sigmas from LMS
97
+ scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
98
+ self.register_modules(
99
+ vae=vae,
100
+ text_encoder=text_encoder,
101
+ tokenizer=tokenizer,
102
+ unet=unet,
103
+ scheduler=scheduler,
104
+ safety_checker=safety_checker,
105
+ feature_extractor=feature_extractor,
106
+ )
107
+
108
+ model = ModelWrapper(unet, scheduler.alphas_cumprod)
109
+ if scheduler.config.prediction_type == "v_prediction":
110
+ self.k_diffusion_model = CompVisVDenoiser(model)
111
+ else:
112
+ self.k_diffusion_model = CompVisDenoiser(model)
113
+
114
+ def set_sampler(self, scheduler_type: str):
115
+ warnings.warn("The `set_sampler` method is deprecated, please use `set_scheduler` instead.")
116
+ return self.set_scheduler(scheduler_type)
117
+
118
+ def set_scheduler(self, scheduler_type: str):
119
+ library = importlib.import_module("k_diffusion")
120
+ sampling = getattr(library, "sampling")
121
+ self.sampler = getattr(sampling, scheduler_type)
122
+
123
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
124
+ r"""
125
+ Enable sliced attention computation.
126
+
127
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
128
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
129
+
130
+ Args:
131
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
132
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
133
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
134
+ `attention_head_dim` must be a multiple of `slice_size`.
135
+ """
136
+ if slice_size == "auto":
137
+ # half the attention head size is usually a good trade-off between
138
+ # speed and memory
139
+ slice_size = self.unet.config.attention_head_dim // 2
140
+ self.unet.set_attention_slice(slice_size)
141
+
142
+ def disable_attention_slicing(self):
143
+ r"""
144
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
145
+ back to computing attention in one step.
146
+ """
147
+ # set slice_size = `None` to disable `attention slicing`
148
+ self.enable_attention_slicing(None)
149
+
150
+ def enable_sequential_cpu_offload(self, gpu_id=0):
151
+ r"""
152
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
153
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
154
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
155
+ """
156
+ if is_accelerate_available():
157
+ from accelerate import cpu_offload
158
+ else:
159
+ raise ImportError("Please install accelerate via `pip install accelerate`")
160
+
161
+ device = torch.device(f"cuda:{gpu_id}")
162
+
163
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
164
+ if cpu_offloaded_model is not None:
165
+ cpu_offload(cpu_offloaded_model, device)
166
+
167
+ @property
168
+ def _execution_device(self):
169
+ r"""
170
+ Returns the device on which the pipeline's models will be executed. After calling
171
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
172
+ hooks.
173
+ """
174
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
175
+ return self.device
176
+ for module in self.unet.modules():
177
+ if (
178
+ hasattr(module, "_hf_hook")
179
+ and hasattr(module._hf_hook, "execution_device")
180
+ and module._hf_hook.execution_device is not None
181
+ ):
182
+ return torch.device(module._hf_hook.execution_device)
183
+ return self.device
184
+
185
+ def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
186
+ r"""
187
+ Encodes the prompt into text encoder hidden states.
188
+
189
+ Args:
190
+ prompt (`str` or `list(int)`):
191
+ prompt to be encoded
192
+ device: (`torch.device`):
193
+ torch device
194
+ num_images_per_prompt (`int`):
195
+ number of images that should be generated per prompt
196
+ do_classifier_free_guidance (`bool`):
197
+ whether to use classifier free guidance or not
198
+ negative_prompt (`str` or `List[str]`):
199
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
200
+ if `guidance_scale` is less than `1`).
201
+ """
202
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
203
+
204
+ text_inputs = self.tokenizer(
205
+ prompt,
206
+ padding="max_length",
207
+ max_length=self.tokenizer.model_max_length,
208
+ truncation=True,
209
+ return_tensors="pt",
210
+ )
211
+ text_input_ids = text_inputs.input_ids
212
+ untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
213
+
214
+ if not torch.equal(text_input_ids, untruncated_ids):
215
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
216
+ logger.warning(
217
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
218
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
219
+ )
220
+
221
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
222
+ attention_mask = text_inputs.attention_mask.to(device)
223
+ else:
224
+ attention_mask = None
225
+
226
+ text_embeddings = self.text_encoder(
227
+ text_input_ids.to(device),
228
+ attention_mask=attention_mask,
229
+ )
230
+ text_embeddings = text_embeddings[0]
231
+
232
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
233
+ bs_embed, seq_len, _ = text_embeddings.shape
234
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
235
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
236
+
237
+ # get unconditional embeddings for classifier free guidance
238
+ if do_classifier_free_guidance:
239
+ uncond_tokens: List[str]
240
+ if negative_prompt is None:
241
+ uncond_tokens = [""] * batch_size
242
+ elif type(prompt) is not type(negative_prompt):
243
+ raise TypeError(
244
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
245
+ f" {type(prompt)}."
246
+ )
247
+ elif isinstance(negative_prompt, str):
248
+ uncond_tokens = [negative_prompt]
249
+ elif batch_size != len(negative_prompt):
250
+ raise ValueError(
251
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
252
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
253
+ " the batch size of `prompt`."
254
+ )
255
+ else:
256
+ uncond_tokens = negative_prompt
257
+
258
+ max_length = text_input_ids.shape[-1]
259
+ uncond_input = self.tokenizer(
260
+ uncond_tokens,
261
+ padding="max_length",
262
+ max_length=max_length,
263
+ truncation=True,
264
+ return_tensors="pt",
265
+ )
266
+
267
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
268
+ attention_mask = uncond_input.attention_mask.to(device)
269
+ else:
270
+ attention_mask = None
271
+
272
+ uncond_embeddings = self.text_encoder(
273
+ uncond_input.input_ids.to(device),
274
+ attention_mask=attention_mask,
275
+ )
276
+ uncond_embeddings = uncond_embeddings[0]
277
+
278
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
279
+ seq_len = uncond_embeddings.shape[1]
280
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
281
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
282
+
283
+ # For classifier free guidance, we need to do two forward passes.
284
+ # Here we concatenate the unconditional and text embeddings into a single batch
285
+ # to avoid doing two forward passes
286
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
287
+
288
+ return text_embeddings
289
+
290
+ def run_safety_checker(self, image, device, dtype):
291
+ if self.safety_checker is not None:
292
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
293
+ image, has_nsfw_concept = self.safety_checker(
294
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
295
+ )
296
+ else:
297
+ has_nsfw_concept = None
298
+ return image, has_nsfw_concept
299
+
300
+ def decode_latents(self, latents):
301
+ latents = 1 / 0.18215 * latents
302
+ image = self.vae.decode(latents).sample
303
+ image = (image / 2 + 0.5).clamp(0, 1)
304
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
305
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
306
+ return image
307
+
308
+ def check_inputs(self, prompt, height, width, callback_steps):
309
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
310
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
311
+
312
+ if height % 8 != 0 or width % 8 != 0:
313
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
314
+
315
+ if (callback_steps is None) or (
316
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
317
+ ):
318
+ raise ValueError(
319
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
320
+ f" {type(callback_steps)}."
321
+ )
322
+
323
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
324
+ shape = (batch_size, num_channels_latents, height // 8, width // 8)
325
+ if latents is None:
326
+ if device.type == "mps":
327
+ # randn does not work reproducibly on mps
328
+ latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
329
+ else:
330
+ latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
331
+ else:
332
+ if latents.shape != shape:
333
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
334
+ latents = latents.to(device)
335
+
336
+ # scale the initial noise by the standard deviation required by the scheduler
337
+ return latents
338
+
339
+ @torch.no_grad()
340
+ def __call__(
341
+ self,
342
+ prompt: Union[str, List[str]],
343
+ height: int = 512,
344
+ width: int = 512,
345
+ num_inference_steps: int = 50,
346
+ guidance_scale: float = 7.5,
347
+ negative_prompt: Optional[Union[str, List[str]]] = None,
348
+ num_images_per_prompt: Optional[int] = 1,
349
+ eta: float = 0.0,
350
+ generator: Optional[torch.Generator] = None,
351
+ latents: Optional[torch.FloatTensor] = None,
352
+ output_type: Optional[str] = "pil",
353
+ return_dict: bool = True,
354
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
355
+ callback_steps: int = 1,
356
+ **kwargs,
357
+ ):
358
+ r"""
359
+ Function invoked when calling the pipeline for generation.
360
+
361
+ Args:
362
+ prompt (`str` or `List[str]`):
363
+ The prompt or prompts to guide the image generation.
364
+ height (`int`, *optional*, defaults to 512):
365
+ The height in pixels of the generated image.
366
+ width (`int`, *optional*, defaults to 512):
367
+ The width in pixels of the generated image.
368
+ num_inference_steps (`int`, *optional*, defaults to 50):
369
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
370
+ expense of slower inference.
371
+ guidance_scale (`float`, *optional*, defaults to 7.5):
372
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
373
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
374
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
375
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
376
+ usually at the expense of lower image quality.
377
+ negative_prompt (`str` or `List[str]`, *optional*):
378
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
379
+ if `guidance_scale` is less than `1`).
380
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
381
+ The number of images to generate per prompt.
382
+ eta (`float`, *optional*, defaults to 0.0):
383
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
384
+ [`schedulers.DDIMScheduler`], will be ignored for others.
385
+ generator (`torch.Generator`, *optional*):
386
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
387
+ deterministic.
388
+ latents (`torch.FloatTensor`, *optional*):
389
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
390
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
391
+ tensor will ge generated by sampling using the supplied random `generator`.
392
+ output_type (`str`, *optional*, defaults to `"pil"`):
393
+ The output format of the generate image. Choose between
394
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
395
+ return_dict (`bool`, *optional*, defaults to `True`):
396
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
397
+ plain tuple.
398
+ callback (`Callable`, *optional*):
399
+ A function that will be called every `callback_steps` steps during inference. The function will be
400
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
401
+ callback_steps (`int`, *optional*, defaults to 1):
402
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
403
+ called at every step.
404
+
405
+ Returns:
406
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
407
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
408
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
409
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
410
+ (nsfw) content, according to the `safety_checker`.
411
+ """
412
+
413
+ # 1. Check inputs. Raise error if not correct
414
+ self.check_inputs(prompt, height, width, callback_steps)
415
+
416
+ # 2. Define call parameters
417
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
418
+ device = self._execution_device
419
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
420
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
421
+ # corresponds to doing no classifier free guidance.
422
+ do_classifier_free_guidance = True
423
+ if guidance_scale <= 1.0:
424
+ raise ValueError("has to use guidance_scale")
425
+
426
+ # 3. Encode input prompt
427
+ text_embeddings = self._encode_prompt(
428
+ prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
429
+ )
430
+
431
+ # 4. Prepare timesteps
432
+ self.scheduler.set_timesteps(num_inference_steps, device=text_embeddings.device)
433
+ sigmas = self.scheduler.sigmas
434
+ sigmas = sigmas.to(text_embeddings.dtype)
435
+
436
+ # 5. Prepare latent variables
437
+ num_channels_latents = self.unet.config.in_channels
438
+ latents = self.prepare_latents(
439
+ batch_size * num_images_per_prompt,
440
+ num_channels_latents,
441
+ height,
442
+ width,
443
+ text_embeddings.dtype,
444
+ device,
445
+ generator,
446
+ latents,
447
+ )
448
+ latents = latents * sigmas[0]
449
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
450
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device)
451
+
452
+ def model_fn(x, t):
453
+ latent_model_input = torch.cat([x] * 2)
454
+
455
+ noise_pred = self.k_diffusion_model(latent_model_input, t, cond=text_embeddings)
456
+
457
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
458
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
459
+ return noise_pred
460
+
461
+ latents = self.sampler(model_fn, latents, sigmas)
462
+
463
+ # 8. Post-processing
464
+ image = self.decode_latents(latents)
465
+
466
+ # 9. Run safety checker
467
+ image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
468
+
469
+ # 10. Convert to PIL
470
+ if output_type == "pil":
471
+ image = self.numpy_to_pil(image)
472
+
473
+ if not return_dict:
474
+ return (image, has_nsfw_concept)
475
+
476
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/seed_resize_stable_diffusion.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ modified based on diffusion library from Huggingface: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
3
+ """
4
+ import inspect
5
+ from typing import Callable, List, Optional, Union
6
+
7
+ import torch
8
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
9
+
10
+ from diffusers import DiffusionPipeline
11
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
15
+ from diffusers.utils import logging
16
+
17
+
18
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
19
+
20
+
21
+ class SeedResizeStableDiffusionPipeline(DiffusionPipeline):
22
+ r"""
23
+ Pipeline for text-to-image generation using Stable Diffusion.
24
+
25
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
26
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
27
+
28
+ Args:
29
+ vae ([`AutoencoderKL`]):
30
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
31
+ text_encoder ([`CLIPTextModel`]):
32
+ Frozen text-encoder. Stable Diffusion uses the text portion of
33
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
34
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
35
+ tokenizer (`CLIPTokenizer`):
36
+ Tokenizer of class
37
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
38
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
39
+ scheduler ([`SchedulerMixin`]):
40
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
41
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
42
+ safety_checker ([`StableDiffusionSafetyChecker`]):
43
+ Classification module that estimates whether generated images could be considered offensive or harmful.
44
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
45
+ feature_extractor ([`CLIPImageProcessor`]):
46
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ vae: AutoencoderKL,
52
+ text_encoder: CLIPTextModel,
53
+ tokenizer: CLIPTokenizer,
54
+ unet: UNet2DConditionModel,
55
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
56
+ safety_checker: StableDiffusionSafetyChecker,
57
+ feature_extractor: CLIPImageProcessor,
58
+ ):
59
+ super().__init__()
60
+ self.register_modules(
61
+ vae=vae,
62
+ text_encoder=text_encoder,
63
+ tokenizer=tokenizer,
64
+ unet=unet,
65
+ scheduler=scheduler,
66
+ safety_checker=safety_checker,
67
+ feature_extractor=feature_extractor,
68
+ )
69
+
70
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
71
+ r"""
72
+ Enable sliced attention computation.
73
+
74
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
75
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
76
+
77
+ Args:
78
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
79
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
80
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
81
+ `attention_head_dim` must be a multiple of `slice_size`.
82
+ """
83
+ if slice_size == "auto":
84
+ # half the attention head size is usually a good trade-off between
85
+ # speed and memory
86
+ slice_size = self.unet.config.attention_head_dim // 2
87
+ self.unet.set_attention_slice(slice_size)
88
+
89
+ def disable_attention_slicing(self):
90
+ r"""
91
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
92
+ back to computing attention in one step.
93
+ """
94
+ # set slice_size = `None` to disable `attention slicing`
95
+ self.enable_attention_slicing(None)
96
+
97
+ @torch.no_grad()
98
+ def __call__(
99
+ self,
100
+ prompt: Union[str, List[str]],
101
+ height: int = 512,
102
+ width: int = 512,
103
+ num_inference_steps: int = 50,
104
+ guidance_scale: float = 7.5,
105
+ negative_prompt: Optional[Union[str, List[str]]] = None,
106
+ num_images_per_prompt: Optional[int] = 1,
107
+ eta: float = 0.0,
108
+ generator: Optional[torch.Generator] = None,
109
+ latents: Optional[torch.FloatTensor] = None,
110
+ output_type: Optional[str] = "pil",
111
+ return_dict: bool = True,
112
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
113
+ callback_steps: int = 1,
114
+ text_embeddings: Optional[torch.FloatTensor] = None,
115
+ **kwargs,
116
+ ):
117
+ r"""
118
+ Function invoked when calling the pipeline for generation.
119
+
120
+ Args:
121
+ prompt (`str` or `List[str]`):
122
+ The prompt or prompts to guide the image generation.
123
+ height (`int`, *optional*, defaults to 512):
124
+ The height in pixels of the generated image.
125
+ width (`int`, *optional*, defaults to 512):
126
+ The width in pixels of the generated image.
127
+ num_inference_steps (`int`, *optional*, defaults to 50):
128
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
129
+ expense of slower inference.
130
+ guidance_scale (`float`, *optional*, defaults to 7.5):
131
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
132
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
133
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
134
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
135
+ usually at the expense of lower image quality.
136
+ negative_prompt (`str` or `List[str]`, *optional*):
137
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
138
+ if `guidance_scale` is less than `1`).
139
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
140
+ The number of images to generate per prompt.
141
+ eta (`float`, *optional*, defaults to 0.0):
142
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
143
+ [`schedulers.DDIMScheduler`], will be ignored for others.
144
+ generator (`torch.Generator`, *optional*):
145
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
146
+ deterministic.
147
+ latents (`torch.FloatTensor`, *optional*):
148
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
149
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
150
+ tensor will ge generated by sampling using the supplied random `generator`.
151
+ output_type (`str`, *optional*, defaults to `"pil"`):
152
+ The output format of the generate image. Choose between
153
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
154
+ return_dict (`bool`, *optional*, defaults to `True`):
155
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
156
+ plain tuple.
157
+ callback (`Callable`, *optional*):
158
+ A function that will be called every `callback_steps` steps during inference. The function will be
159
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
160
+ callback_steps (`int`, *optional*, defaults to 1):
161
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
162
+ called at every step.
163
+
164
+ Returns:
165
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
166
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
167
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
168
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
169
+ (nsfw) content, according to the `safety_checker`.
170
+ """
171
+
172
+ if isinstance(prompt, str):
173
+ batch_size = 1
174
+ elif isinstance(prompt, list):
175
+ batch_size = len(prompt)
176
+ else:
177
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
178
+
179
+ if height % 8 != 0 or width % 8 != 0:
180
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
181
+
182
+ if (callback_steps is None) or (
183
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
184
+ ):
185
+ raise ValueError(
186
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
187
+ f" {type(callback_steps)}."
188
+ )
189
+
190
+ # get prompt text embeddings
191
+ text_inputs = self.tokenizer(
192
+ prompt,
193
+ padding="max_length",
194
+ max_length=self.tokenizer.model_max_length,
195
+ return_tensors="pt",
196
+ )
197
+ text_input_ids = text_inputs.input_ids
198
+
199
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
200
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
201
+ logger.warning(
202
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
203
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
204
+ )
205
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
206
+
207
+ if text_embeddings is None:
208
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
209
+
210
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
211
+ bs_embed, seq_len, _ = text_embeddings.shape
212
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
213
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
214
+
215
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
216
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
217
+ # corresponds to doing no classifier free guidance.
218
+ do_classifier_free_guidance = guidance_scale > 1.0
219
+ # get unconditional embeddings for classifier free guidance
220
+ if do_classifier_free_guidance:
221
+ uncond_tokens: List[str]
222
+ if negative_prompt is None:
223
+ uncond_tokens = [""]
224
+ elif type(prompt) is not type(negative_prompt):
225
+ raise TypeError(
226
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
227
+ f" {type(prompt)}."
228
+ )
229
+ elif isinstance(negative_prompt, str):
230
+ uncond_tokens = [negative_prompt]
231
+ elif batch_size != len(negative_prompt):
232
+ raise ValueError(
233
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
234
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
235
+ " the batch size of `prompt`."
236
+ )
237
+ else:
238
+ uncond_tokens = negative_prompt
239
+
240
+ max_length = text_input_ids.shape[-1]
241
+ uncond_input = self.tokenizer(
242
+ uncond_tokens,
243
+ padding="max_length",
244
+ max_length=max_length,
245
+ truncation=True,
246
+ return_tensors="pt",
247
+ )
248
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
249
+
250
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
251
+ seq_len = uncond_embeddings.shape[1]
252
+ uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
253
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
254
+
255
+ # For classifier free guidance, we need to do two forward passes.
256
+ # Here we concatenate the unconditional and text embeddings into a single batch
257
+ # to avoid doing two forward passes
258
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
259
+
260
+ # get the initial random noise unless the user supplied it
261
+
262
+ # Unlike in other pipelines, latents need to be generated in the target device
263
+ # for 1-to-1 results reproducibility with the CompVis implementation.
264
+ # However this currently doesn't work in `mps`.
265
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
266
+ latents_shape_reference = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
267
+ latents_dtype = text_embeddings.dtype
268
+ if latents is None:
269
+ if self.device.type == "mps":
270
+ # randn does not exist on mps
271
+ latents_reference = torch.randn(
272
+ latents_shape_reference, generator=generator, device="cpu", dtype=latents_dtype
273
+ ).to(self.device)
274
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
275
+ self.device
276
+ )
277
+ else:
278
+ latents_reference = torch.randn(
279
+ latents_shape_reference, generator=generator, device=self.device, dtype=latents_dtype
280
+ )
281
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
282
+ else:
283
+ if latents_reference.shape != latents_shape:
284
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
285
+ latents_reference = latents_reference.to(self.device)
286
+ latents = latents.to(self.device)
287
+
288
+ # This is the key part of the pipeline where we
289
+ # try to ensure that the generated images w/ the same seed
290
+ # but different sizes actually result in similar images
291
+ dx = (latents_shape[3] - latents_shape_reference[3]) // 2
292
+ dy = (latents_shape[2] - latents_shape_reference[2]) // 2
293
+ w = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
294
+ h = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
295
+ tx = 0 if dx < 0 else dx
296
+ ty = 0 if dy < 0 else dy
297
+ dx = max(-dx, 0)
298
+ dy = max(-dy, 0)
299
+ # import pdb
300
+ # pdb.set_trace()
301
+ latents[:, :, ty : ty + h, tx : tx + w] = latents_reference[:, :, dy : dy + h, dx : dx + w]
302
+
303
+ # set timesteps
304
+ self.scheduler.set_timesteps(num_inference_steps)
305
+
306
+ # Some schedulers like PNDM have timesteps as arrays
307
+ # It's more optimized to move all timesteps to correct device beforehand
308
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
309
+
310
+ # scale the initial noise by the standard deviation required by the scheduler
311
+ latents = latents * self.scheduler.init_noise_sigma
312
+
313
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
314
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
315
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
316
+ # and should be between [0, 1]
317
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
318
+ extra_step_kwargs = {}
319
+ if accepts_eta:
320
+ extra_step_kwargs["eta"] = eta
321
+
322
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
323
+ # expand the latents if we are doing classifier free guidance
324
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
325
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
326
+
327
+ # predict the noise residual
328
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
329
+
330
+ # perform guidance
331
+ if do_classifier_free_guidance:
332
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
333
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
334
+
335
+ # compute the previous noisy sample x_t -> x_t-1
336
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
337
+
338
+ # call the callback, if provided
339
+ if callback is not None and i % callback_steps == 0:
340
+ step_idx = i // getattr(self.scheduler, "order", 1)
341
+ callback(step_idx, t, latents)
342
+
343
+ latents = 1 / 0.18215 * latents
344
+ image = self.vae.decode(latents).sample
345
+
346
+ image = (image / 2 + 0.5).clamp(0, 1)
347
+
348
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
349
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
350
+
351
+ if self.safety_checker is not None:
352
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
353
+ self.device
354
+ )
355
+ image, has_nsfw_concept = self.safety_checker(
356
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
357
+ )
358
+ else:
359
+ has_nsfw_concept = None
360
+
361
+ if output_type == "pil":
362
+ image = self.numpy_to_pil(image)
363
+
364
+ if not return_dict:
365
+ return (image, has_nsfw_concept)
366
+
367
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/speech_to_image_diffusion.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, List, Optional, Union
3
+
4
+ import torch
5
+ from transformers import (
6
+ CLIPImageProcessor,
7
+ CLIPTextModel,
8
+ CLIPTokenizer,
9
+ WhisperForConditionalGeneration,
10
+ WhisperProcessor,
11
+ )
12
+
13
+ from diffusers import (
14
+ AutoencoderKL,
15
+ DDIMScheduler,
16
+ DiffusionPipeline,
17
+ LMSDiscreteScheduler,
18
+ PNDMScheduler,
19
+ UNet2DConditionModel,
20
+ )
21
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
23
+ from diffusers.utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
27
+
28
+
29
+ class SpeechToImagePipeline(DiffusionPipeline):
30
+ def __init__(
31
+ self,
32
+ speech_model: WhisperForConditionalGeneration,
33
+ speech_processor: WhisperProcessor,
34
+ vae: AutoencoderKL,
35
+ text_encoder: CLIPTextModel,
36
+ tokenizer: CLIPTokenizer,
37
+ unet: UNet2DConditionModel,
38
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
39
+ safety_checker: StableDiffusionSafetyChecker,
40
+ feature_extractor: CLIPImageProcessor,
41
+ ):
42
+ super().__init__()
43
+
44
+ if safety_checker is None:
45
+ logger.warning(
46
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
47
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
48
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
49
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
50
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
51
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
52
+ )
53
+
54
+ self.register_modules(
55
+ speech_model=speech_model,
56
+ speech_processor=speech_processor,
57
+ vae=vae,
58
+ text_encoder=text_encoder,
59
+ tokenizer=tokenizer,
60
+ unet=unet,
61
+ scheduler=scheduler,
62
+ feature_extractor=feature_extractor,
63
+ )
64
+
65
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
66
+ if slice_size == "auto":
67
+ slice_size = self.unet.config.attention_head_dim // 2
68
+ self.unet.set_attention_slice(slice_size)
69
+
70
+ def disable_attention_slicing(self):
71
+ self.enable_attention_slicing(None)
72
+
73
+ @torch.no_grad()
74
+ def __call__(
75
+ self,
76
+ audio,
77
+ sampling_rate=16_000,
78
+ height: int = 512,
79
+ width: int = 512,
80
+ num_inference_steps: int = 50,
81
+ guidance_scale: float = 7.5,
82
+ negative_prompt: Optional[Union[str, List[str]]] = None,
83
+ num_images_per_prompt: Optional[int] = 1,
84
+ eta: float = 0.0,
85
+ generator: Optional[torch.Generator] = None,
86
+ latents: Optional[torch.FloatTensor] = None,
87
+ output_type: Optional[str] = "pil",
88
+ return_dict: bool = True,
89
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
90
+ callback_steps: int = 1,
91
+ **kwargs,
92
+ ):
93
+ inputs = self.speech_processor.feature_extractor(
94
+ audio, return_tensors="pt", sampling_rate=sampling_rate
95
+ ).input_features.to(self.device)
96
+ predicted_ids = self.speech_model.generate(inputs, max_length=480_000)
97
+
98
+ prompt = self.speech_processor.tokenizer.batch_decode(predicted_ids, skip_special_tokens=True, normalize=True)[
99
+ 0
100
+ ]
101
+
102
+ if isinstance(prompt, str):
103
+ batch_size = 1
104
+ elif isinstance(prompt, list):
105
+ batch_size = len(prompt)
106
+ else:
107
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
108
+
109
+ if height % 8 != 0 or width % 8 != 0:
110
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
111
+
112
+ if (callback_steps is None) or (
113
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
114
+ ):
115
+ raise ValueError(
116
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
117
+ f" {type(callback_steps)}."
118
+ )
119
+
120
+ # get prompt text embeddings
121
+ text_inputs = self.tokenizer(
122
+ prompt,
123
+ padding="max_length",
124
+ max_length=self.tokenizer.model_max_length,
125
+ return_tensors="pt",
126
+ )
127
+ text_input_ids = text_inputs.input_ids
128
+
129
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
130
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
131
+ logger.warning(
132
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
133
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
134
+ )
135
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
136
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
137
+
138
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
139
+ bs_embed, seq_len, _ = text_embeddings.shape
140
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
141
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
142
+
143
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
144
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
145
+ # corresponds to doing no classifier free guidance.
146
+ do_classifier_free_guidance = guidance_scale > 1.0
147
+ # get unconditional embeddings for classifier free guidance
148
+ if do_classifier_free_guidance:
149
+ uncond_tokens: List[str]
150
+ if negative_prompt is None:
151
+ uncond_tokens = [""] * batch_size
152
+ elif type(prompt) is not type(negative_prompt):
153
+ raise TypeError(
154
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
155
+ f" {type(prompt)}."
156
+ )
157
+ elif isinstance(negative_prompt, str):
158
+ uncond_tokens = [negative_prompt]
159
+ elif batch_size != len(negative_prompt):
160
+ raise ValueError(
161
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
162
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
163
+ " the batch size of `prompt`."
164
+ )
165
+ else:
166
+ uncond_tokens = negative_prompt
167
+
168
+ max_length = text_input_ids.shape[-1]
169
+ uncond_input = self.tokenizer(
170
+ uncond_tokens,
171
+ padding="max_length",
172
+ max_length=max_length,
173
+ truncation=True,
174
+ return_tensors="pt",
175
+ )
176
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
177
+
178
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
179
+ seq_len = uncond_embeddings.shape[1]
180
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
181
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
182
+
183
+ # For classifier free guidance, we need to do two forward passes.
184
+ # Here we concatenate the unconditional and text embeddings into a single batch
185
+ # to avoid doing two forward passes
186
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
187
+
188
+ # get the initial random noise unless the user supplied it
189
+
190
+ # Unlike in other pipelines, latents need to be generated in the target device
191
+ # for 1-to-1 results reproducibility with the CompVis implementation.
192
+ # However this currently doesn't work in `mps`.
193
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
194
+ latents_dtype = text_embeddings.dtype
195
+ if latents is None:
196
+ if self.device.type == "mps":
197
+ # randn does not exist on mps
198
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
199
+ self.device
200
+ )
201
+ else:
202
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
203
+ else:
204
+ if latents.shape != latents_shape:
205
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
206
+ latents = latents.to(self.device)
207
+
208
+ # set timesteps
209
+ self.scheduler.set_timesteps(num_inference_steps)
210
+
211
+ # Some schedulers like PNDM have timesteps as arrays
212
+ # It's more optimized to move all timesteps to correct device beforehand
213
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
214
+
215
+ # scale the initial noise by the standard deviation required by the scheduler
216
+ latents = latents * self.scheduler.init_noise_sigma
217
+
218
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
219
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
220
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
221
+ # and should be between [0, 1]
222
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
223
+ extra_step_kwargs = {}
224
+ if accepts_eta:
225
+ extra_step_kwargs["eta"] = eta
226
+
227
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
228
+ # expand the latents if we are doing classifier free guidance
229
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
230
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
231
+
232
+ # predict the noise residual
233
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
234
+
235
+ # perform guidance
236
+ if do_classifier_free_guidance:
237
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
238
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
239
+
240
+ # compute the previous noisy sample x_t -> x_t-1
241
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
242
+
243
+ # call the callback, if provided
244
+ if callback is not None and i % callback_steps == 0:
245
+ step_idx = i // getattr(self.scheduler, "order", 1)
246
+ callback(step_idx, t, latents)
247
+
248
+ latents = 1 / 0.18215 * latents
249
+ image = self.vae.decode(latents).sample
250
+
251
+ image = (image / 2 + 0.5).clamp(0, 1)
252
+
253
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
254
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
255
+
256
+ if output_type == "pil":
257
+ image = self.numpy_to_pil(image)
258
+
259
+ if not return_dict:
260
+ return image
261
+
262
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
v0.24.0/stable_diffusion_comparison.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Union
2
+
3
+ import torch
4
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
5
+
6
+ from diffusers import (
7
+ AutoencoderKL,
8
+ DDIMScheduler,
9
+ DiffusionPipeline,
10
+ LMSDiscreteScheduler,
11
+ PNDMScheduler,
12
+ StableDiffusionPipeline,
13
+ UNet2DConditionModel,
14
+ )
15
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
16
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
17
+
18
+
19
+ pipe1_model_id = "CompVis/stable-diffusion-v1-1"
20
+ pipe2_model_id = "CompVis/stable-diffusion-v1-2"
21
+ pipe3_model_id = "CompVis/stable-diffusion-v1-3"
22
+ pipe4_model_id = "CompVis/stable-diffusion-v1-4"
23
+
24
+
25
+ class StableDiffusionComparisonPipeline(DiffusionPipeline):
26
+ r"""
27
+ Pipeline for parallel comparison of Stable Diffusion v1-v4
28
+ This pipeline inherits from DiffusionPipeline and depends on the use of an Auth Token for
29
+ downloading pre-trained checkpoints from Hugging Face Hub.
30
+ If using Hugging Face Hub, pass the Model ID for Stable Diffusion v1.4 as the previous 3 checkpoints will be loaded
31
+ automatically.
32
+ Args:
33
+ vae ([`AutoencoderKL`]):
34
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
35
+ text_encoder ([`CLIPTextModel`]):
36
+ Frozen text-encoder. Stable Diffusion uses the text portion of
37
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
38
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
39
+ tokenizer (`CLIPTokenizer`):
40
+ Tokenizer of class
41
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
42
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
43
+ scheduler ([`SchedulerMixin`]):
44
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
45
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
46
+ safety_checker ([`StableDiffusionMegaSafetyChecker`]):
47
+ Classification module that estimates whether generated images could be considered offensive or harmful.
48
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
49
+ feature_extractor ([`CLIPImageProcessor`]):
50
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ vae: AutoencoderKL,
56
+ text_encoder: CLIPTextModel,
57
+ tokenizer: CLIPTokenizer,
58
+ unet: UNet2DConditionModel,
59
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
60
+ safety_checker: StableDiffusionSafetyChecker,
61
+ feature_extractor: CLIPImageProcessor,
62
+ requires_safety_checker: bool = True,
63
+ ):
64
+ super()._init_()
65
+
66
+ self.pipe1 = StableDiffusionPipeline.from_pretrained(pipe1_model_id)
67
+ self.pipe2 = StableDiffusionPipeline.from_pretrained(pipe2_model_id)
68
+ self.pipe3 = StableDiffusionPipeline.from_pretrained(pipe3_model_id)
69
+ self.pipe4 = StableDiffusionPipeline(
70
+ vae=vae,
71
+ text_encoder=text_encoder,
72
+ tokenizer=tokenizer,
73
+ unet=unet,
74
+ scheduler=scheduler,
75
+ safety_checker=safety_checker,
76
+ feature_extractor=feature_extractor,
77
+ requires_safety_checker=requires_safety_checker,
78
+ )
79
+
80
+ self.register_modules(pipeline1=self.pipe1, pipeline2=self.pipe2, pipeline3=self.pipe3, pipeline4=self.pipe4)
81
+
82
+ @property
83
+ def layers(self) -> Dict[str, Any]:
84
+ return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
85
+
86
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
87
+ r"""
88
+ Enable sliced attention computation.
89
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
90
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
91
+ Args:
92
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
93
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
94
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
95
+ `attention_head_dim` must be a multiple of `slice_size`.
96
+ """
97
+ if slice_size == "auto":
98
+ # half the attention head size is usually a good trade-off between
99
+ # speed and memory
100
+ slice_size = self.unet.config.attention_head_dim // 2
101
+ self.unet.set_attention_slice(slice_size)
102
+
103
+ def disable_attention_slicing(self):
104
+ r"""
105
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
106
+ back to computing attention in one step.
107
+ """
108
+ # set slice_size = `None` to disable `attention slicing`
109
+ self.enable_attention_slicing(None)
110
+
111
+ @torch.no_grad()
112
+ def text2img_sd1_1(
113
+ self,
114
+ prompt: Union[str, List[str]],
115
+ height: int = 512,
116
+ width: int = 512,
117
+ num_inference_steps: int = 50,
118
+ guidance_scale: float = 7.5,
119
+ negative_prompt: Optional[Union[str, List[str]]] = None,
120
+ num_images_per_prompt: Optional[int] = 1,
121
+ eta: float = 0.0,
122
+ generator: Optional[torch.Generator] = None,
123
+ latents: Optional[torch.FloatTensor] = None,
124
+ output_type: Optional[str] = "pil",
125
+ return_dict: bool = True,
126
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
127
+ callback_steps: int = 1,
128
+ **kwargs,
129
+ ):
130
+ return self.pipe1(
131
+ prompt=prompt,
132
+ height=height,
133
+ width=width,
134
+ num_inference_steps=num_inference_steps,
135
+ guidance_scale=guidance_scale,
136
+ negative_prompt=negative_prompt,
137
+ num_images_per_prompt=num_images_per_prompt,
138
+ eta=eta,
139
+ generator=generator,
140
+ latents=latents,
141
+ output_type=output_type,
142
+ return_dict=return_dict,
143
+ callback=callback,
144
+ callback_steps=callback_steps,
145
+ **kwargs,
146
+ )
147
+
148
+ @torch.no_grad()
149
+ def text2img_sd1_2(
150
+ self,
151
+ prompt: Union[str, List[str]],
152
+ height: int = 512,
153
+ width: int = 512,
154
+ num_inference_steps: int = 50,
155
+ guidance_scale: float = 7.5,
156
+ negative_prompt: Optional[Union[str, List[str]]] = None,
157
+ num_images_per_prompt: Optional[int] = 1,
158
+ eta: float = 0.0,
159
+ generator: Optional[torch.Generator] = None,
160
+ latents: Optional[torch.FloatTensor] = None,
161
+ output_type: Optional[str] = "pil",
162
+ return_dict: bool = True,
163
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
164
+ callback_steps: int = 1,
165
+ **kwargs,
166
+ ):
167
+ return self.pipe2(
168
+ prompt=prompt,
169
+ height=height,
170
+ width=width,
171
+ num_inference_steps=num_inference_steps,
172
+ guidance_scale=guidance_scale,
173
+ negative_prompt=negative_prompt,
174
+ num_images_per_prompt=num_images_per_prompt,
175
+ eta=eta,
176
+ generator=generator,
177
+ latents=latents,
178
+ output_type=output_type,
179
+ return_dict=return_dict,
180
+ callback=callback,
181
+ callback_steps=callback_steps,
182
+ **kwargs,
183
+ )
184
+
185
+ @torch.no_grad()
186
+ def text2img_sd1_3(
187
+ self,
188
+ prompt: Union[str, List[str]],
189
+ height: int = 512,
190
+ width: int = 512,
191
+ num_inference_steps: int = 50,
192
+ guidance_scale: float = 7.5,
193
+ negative_prompt: Optional[Union[str, List[str]]] = None,
194
+ num_images_per_prompt: Optional[int] = 1,
195
+ eta: float = 0.0,
196
+ generator: Optional[torch.Generator] = None,
197
+ latents: Optional[torch.FloatTensor] = None,
198
+ output_type: Optional[str] = "pil",
199
+ return_dict: bool = True,
200
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
201
+ callback_steps: int = 1,
202
+ **kwargs,
203
+ ):
204
+ return self.pipe3(
205
+ prompt=prompt,
206
+ height=height,
207
+ width=width,
208
+ num_inference_steps=num_inference_steps,
209
+ guidance_scale=guidance_scale,
210
+ negative_prompt=negative_prompt,
211
+ num_images_per_prompt=num_images_per_prompt,
212
+ eta=eta,
213
+ generator=generator,
214
+ latents=latents,
215
+ output_type=output_type,
216
+ return_dict=return_dict,
217
+ callback=callback,
218
+ callback_steps=callback_steps,
219
+ **kwargs,
220
+ )
221
+
222
+ @torch.no_grad()
223
+ def text2img_sd1_4(
224
+ self,
225
+ prompt: Union[str, List[str]],
226
+ height: int = 512,
227
+ width: int = 512,
228
+ num_inference_steps: int = 50,
229
+ guidance_scale: float = 7.5,
230
+ negative_prompt: Optional[Union[str, List[str]]] = None,
231
+ num_images_per_prompt: Optional[int] = 1,
232
+ eta: float = 0.0,
233
+ generator: Optional[torch.Generator] = None,
234
+ latents: Optional[torch.FloatTensor] = None,
235
+ output_type: Optional[str] = "pil",
236
+ return_dict: bool = True,
237
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
238
+ callback_steps: int = 1,
239
+ **kwargs,
240
+ ):
241
+ return self.pipe4(
242
+ prompt=prompt,
243
+ height=height,
244
+ width=width,
245
+ num_inference_steps=num_inference_steps,
246
+ guidance_scale=guidance_scale,
247
+ negative_prompt=negative_prompt,
248
+ num_images_per_prompt=num_images_per_prompt,
249
+ eta=eta,
250
+ generator=generator,
251
+ latents=latents,
252
+ output_type=output_type,
253
+ return_dict=return_dict,
254
+ callback=callback,
255
+ callback_steps=callback_steps,
256
+ **kwargs,
257
+ )
258
+
259
+ @torch.no_grad()
260
+ def _call_(
261
+ self,
262
+ prompt: Union[str, List[str]],
263
+ height: int = 512,
264
+ width: int = 512,
265
+ num_inference_steps: int = 50,
266
+ guidance_scale: float = 7.5,
267
+ negative_prompt: Optional[Union[str, List[str]]] = None,
268
+ num_images_per_prompt: Optional[int] = 1,
269
+ eta: float = 0.0,
270
+ generator: Optional[torch.Generator] = None,
271
+ latents: Optional[torch.FloatTensor] = None,
272
+ output_type: Optional[str] = "pil",
273
+ return_dict: bool = True,
274
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
275
+ callback_steps: int = 1,
276
+ **kwargs,
277
+ ):
278
+ r"""
279
+ Function invoked when calling the pipeline for generation. This function will generate 4 results as part
280
+ of running all the 4 pipelines for SD1.1-1.4 together in a serial-processing, parallel-invocation fashion.
281
+ Args:
282
+ prompt (`str` or `List[str]`):
283
+ The prompt or prompts to guide the image generation.
284
+ height (`int`, optional, defaults to 512):
285
+ The height in pixels of the generated image.
286
+ width (`int`, optional, defaults to 512):
287
+ The width in pixels of the generated image.
288
+ num_inference_steps (`int`, optional, defaults to 50):
289
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
290
+ expense of slower inference.
291
+ guidance_scale (`float`, optional, defaults to 7.5):
292
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
293
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
294
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
295
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
296
+ usually at the expense of lower image quality.
297
+ eta (`float`, optional, defaults to 0.0):
298
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
299
+ [`schedulers.DDIMScheduler`], will be ignored for others.
300
+ generator (`torch.Generator`, optional):
301
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
302
+ deterministic.
303
+ latents (`torch.FloatTensor`, optional):
304
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
305
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
306
+ tensor will ge generated by sampling using the supplied random `generator`.
307
+ output_type (`str`, optional, defaults to `"pil"`):
308
+ The output format of the generate image. Choose between
309
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
310
+ return_dict (`bool`, optional, defaults to `True`):
311
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
312
+ plain tuple.
313
+ Returns:
314
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
315
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
316
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
317
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
318
+ (nsfw) content, according to the `safety_checker`.
319
+ """
320
+
321
+ device = "cuda" if torch.cuda.is_available() else "cpu"
322
+ self.to(device)
323
+
324
+ # Checks if the height and width are divisible by 8 or not
325
+ if height % 8 != 0 or width % 8 != 0:
326
+ raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}.")
327
+
328
+ # Get first result from Stable Diffusion Checkpoint v1.1
329
+ res1 = self.text2img_sd1_1(
330
+ prompt=prompt,
331
+ height=height,
332
+ width=width,
333
+ num_inference_steps=num_inference_steps,
334
+ guidance_scale=guidance_scale,
335
+ negative_prompt=negative_prompt,
336
+ num_images_per_prompt=num_images_per_prompt,
337
+ eta=eta,
338
+ generator=generator,
339
+ latents=latents,
340
+ output_type=output_type,
341
+ return_dict=return_dict,
342
+ callback=callback,
343
+ callback_steps=callback_steps,
344
+ **kwargs,
345
+ )
346
+
347
+ # Get first result from Stable Diffusion Checkpoint v1.2
348
+ res2 = self.text2img_sd1_2(
349
+ prompt=prompt,
350
+ height=height,
351
+ width=width,
352
+ num_inference_steps=num_inference_steps,
353
+ guidance_scale=guidance_scale,
354
+ negative_prompt=negative_prompt,
355
+ num_images_per_prompt=num_images_per_prompt,
356
+ eta=eta,
357
+ generator=generator,
358
+ latents=latents,
359
+ output_type=output_type,
360
+ return_dict=return_dict,
361
+ callback=callback,
362
+ callback_steps=callback_steps,
363
+ **kwargs,
364
+ )
365
+
366
+ # Get first result from Stable Diffusion Checkpoint v1.3
367
+ res3 = self.text2img_sd1_3(
368
+ prompt=prompt,
369
+ height=height,
370
+ width=width,
371
+ num_inference_steps=num_inference_steps,
372
+ guidance_scale=guidance_scale,
373
+ negative_prompt=negative_prompt,
374
+ num_images_per_prompt=num_images_per_prompt,
375
+ eta=eta,
376
+ generator=generator,
377
+ latents=latents,
378
+ output_type=output_type,
379
+ return_dict=return_dict,
380
+ callback=callback,
381
+ callback_steps=callback_steps,
382
+ **kwargs,
383
+ )
384
+
385
+ # Get first result from Stable Diffusion Checkpoint v1.4
386
+ res4 = self.text2img_sd1_4(
387
+ prompt=prompt,
388
+ height=height,
389
+ width=width,
390
+ num_inference_steps=num_inference_steps,
391
+ guidance_scale=guidance_scale,
392
+ negative_prompt=negative_prompt,
393
+ num_images_per_prompt=num_images_per_prompt,
394
+ eta=eta,
395
+ generator=generator,
396
+ latents=latents,
397
+ output_type=output_type,
398
+ return_dict=return_dict,
399
+ callback=callback,
400
+ callback_steps=callback_steps,
401
+ **kwargs,
402
+ )
403
+
404
+ # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
405
+ return StableDiffusionPipelineOutput([res1[0], res2[0], res3[0], res4[0]])
v0.24.0/stable_diffusion_controlnet_img2img.py ADDED
@@ -0,0 +1,990 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
2
+
3
+ import inspect
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5
+
6
+ import numpy as np
7
+ import PIL.Image
8
+ import torch
9
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
+
11
+ from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
12
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import KarrasDiffusionSchedulers
15
+ from diffusers.utils import (
16
+ PIL_INTERPOLATION,
17
+ is_accelerate_available,
18
+ is_accelerate_version,
19
+ replace_example_docstring,
20
+ )
21
+ from diffusers.utils.torch_utils import randn_tensor
22
+
23
+
24
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
25
+
26
+ EXAMPLE_DOC_STRING = """
27
+ Examples:
28
+ ```py
29
+ >>> import numpy as np
30
+ >>> import torch
31
+ >>> from PIL import Image
32
+ >>> from diffusers import ControlNetModel, UniPCMultistepScheduler
33
+ >>> from diffusers.utils import load_image
34
+
35
+ >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
36
+
37
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
38
+
39
+ >>> pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
40
+ "runwayml/stable-diffusion-v1-5",
41
+ controlnet=controlnet,
42
+ safety_checker=None,
43
+ torch_dtype=torch.float16
44
+ )
45
+
46
+ >>> pipe_controlnet.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config)
47
+ >>> pipe_controlnet.enable_xformers_memory_efficient_attention()
48
+ >>> pipe_controlnet.enable_model_cpu_offload()
49
+
50
+ # using image with edges for our canny controlnet
51
+ >>> control_image = load_image(
52
+ "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/vermeer_canny_edged.png")
53
+
54
+
55
+ >>> result_img = pipe_controlnet(controlnet_conditioning_image=control_image,
56
+ image=input_image,
57
+ prompt="an android robot, cyberpank, digitl art masterpiece",
58
+ num_inference_steps=20).images[0]
59
+
60
+ >>> result_img.show()
61
+ ```
62
+ """
63
+
64
+
65
+ def prepare_image(image):
66
+ if isinstance(image, torch.Tensor):
67
+ # Batch single image
68
+ if image.ndim == 3:
69
+ image = image.unsqueeze(0)
70
+
71
+ image = image.to(dtype=torch.float32)
72
+ else:
73
+ # preprocess image
74
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
75
+ image = [image]
76
+
77
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
78
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
79
+ image = np.concatenate(image, axis=0)
80
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
81
+ image = np.concatenate([i[None, :] for i in image], axis=0)
82
+
83
+ image = image.transpose(0, 3, 1, 2)
84
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
85
+
86
+ return image
87
+
88
+
89
+ def prepare_controlnet_conditioning_image(
90
+ controlnet_conditioning_image,
91
+ width,
92
+ height,
93
+ batch_size,
94
+ num_images_per_prompt,
95
+ device,
96
+ dtype,
97
+ do_classifier_free_guidance,
98
+ ):
99
+ if not isinstance(controlnet_conditioning_image, torch.Tensor):
100
+ if isinstance(controlnet_conditioning_image, PIL.Image.Image):
101
+ controlnet_conditioning_image = [controlnet_conditioning_image]
102
+
103
+ if isinstance(controlnet_conditioning_image[0], PIL.Image.Image):
104
+ controlnet_conditioning_image = [
105
+ np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :]
106
+ for i in controlnet_conditioning_image
107
+ ]
108
+ controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0)
109
+ controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0
110
+ controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2)
111
+ controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image)
112
+ elif isinstance(controlnet_conditioning_image[0], torch.Tensor):
113
+ controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0)
114
+
115
+ image_batch_size = controlnet_conditioning_image.shape[0]
116
+
117
+ if image_batch_size == 1:
118
+ repeat_by = batch_size
119
+ else:
120
+ # image batch size is the same as prompt batch size
121
+ repeat_by = num_images_per_prompt
122
+
123
+ controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0)
124
+
125
+ controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
126
+
127
+ if do_classifier_free_guidance:
128
+ controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
129
+
130
+ return controlnet_conditioning_image
131
+
132
+
133
+ class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
134
+ """
135
+ Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
136
+ """
137
+
138
+ _optional_components = ["safety_checker", "feature_extractor"]
139
+
140
+ def __init__(
141
+ self,
142
+ vae: AutoencoderKL,
143
+ text_encoder: CLIPTextModel,
144
+ tokenizer: CLIPTokenizer,
145
+ unet: UNet2DConditionModel,
146
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
147
+ scheduler: KarrasDiffusionSchedulers,
148
+ safety_checker: StableDiffusionSafetyChecker,
149
+ feature_extractor: CLIPImageProcessor,
150
+ requires_safety_checker: bool = True,
151
+ ):
152
+ super().__init__()
153
+
154
+ if safety_checker is None and requires_safety_checker:
155
+ logger.warning(
156
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
157
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
158
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
159
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
160
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
161
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
162
+ )
163
+
164
+ if safety_checker is not None and feature_extractor is None:
165
+ raise ValueError(
166
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
167
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
168
+ )
169
+
170
+ if isinstance(controlnet, (list, tuple)):
171
+ controlnet = MultiControlNetModel(controlnet)
172
+
173
+ self.register_modules(
174
+ vae=vae,
175
+ text_encoder=text_encoder,
176
+ tokenizer=tokenizer,
177
+ unet=unet,
178
+ controlnet=controlnet,
179
+ scheduler=scheduler,
180
+ safety_checker=safety_checker,
181
+ feature_extractor=feature_extractor,
182
+ )
183
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
184
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
185
+
186
+ def enable_vae_slicing(self):
187
+ r"""
188
+ Enable sliced VAE decoding.
189
+
190
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
191
+ steps. This is useful to save some memory and allow larger batch sizes.
192
+ """
193
+ self.vae.enable_slicing()
194
+
195
+ def disable_vae_slicing(self):
196
+ r"""
197
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
198
+ computing decoding in one step.
199
+ """
200
+ self.vae.disable_slicing()
201
+
202
+ def enable_sequential_cpu_offload(self, gpu_id=0):
203
+ r"""
204
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
205
+ text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
206
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
207
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
208
+ `enable_model_cpu_offload`, but performance is lower.
209
+ """
210
+ if is_accelerate_available():
211
+ from accelerate import cpu_offload
212
+ else:
213
+ raise ImportError("Please install accelerate via `pip install accelerate`")
214
+
215
+ device = torch.device(f"cuda:{gpu_id}")
216
+
217
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
218
+ cpu_offload(cpu_offloaded_model, device)
219
+
220
+ if self.safety_checker is not None:
221
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
222
+
223
+ def enable_model_cpu_offload(self, gpu_id=0):
224
+ r"""
225
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
226
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
227
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
228
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
229
+ """
230
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
231
+ from accelerate import cpu_offload_with_hook
232
+ else:
233
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
234
+
235
+ device = torch.device(f"cuda:{gpu_id}")
236
+
237
+ hook = None
238
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
239
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
240
+
241
+ if self.safety_checker is not None:
242
+ # the safety checker can offload the vae again
243
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
244
+
245
+ # control net hook has be manually offloaded as it alternates with unet
246
+ cpu_offload_with_hook(self.controlnet, device)
247
+
248
+ # We'll offload the last model manually.
249
+ self.final_offload_hook = hook
250
+
251
+ @property
252
+ def _execution_device(self):
253
+ r"""
254
+ Returns the device on which the pipeline's models will be executed. After calling
255
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
256
+ hooks.
257
+ """
258
+ if not hasattr(self.unet, "_hf_hook"):
259
+ return self.device
260
+ for module in self.unet.modules():
261
+ if (
262
+ hasattr(module, "_hf_hook")
263
+ and hasattr(module._hf_hook, "execution_device")
264
+ and module._hf_hook.execution_device is not None
265
+ ):
266
+ return torch.device(module._hf_hook.execution_device)
267
+ return self.device
268
+
269
+ def _encode_prompt(
270
+ self,
271
+ prompt,
272
+ device,
273
+ num_images_per_prompt,
274
+ do_classifier_free_guidance,
275
+ negative_prompt=None,
276
+ prompt_embeds: Optional[torch.FloatTensor] = None,
277
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
278
+ ):
279
+ r"""
280
+ Encodes the prompt into text encoder hidden states.
281
+
282
+ Args:
283
+ prompt (`str` or `List[str]`, *optional*):
284
+ prompt to be encoded
285
+ device: (`torch.device`):
286
+ torch device
287
+ num_images_per_prompt (`int`):
288
+ number of images that should be generated per prompt
289
+ do_classifier_free_guidance (`bool`):
290
+ whether to use classifier free guidance or not
291
+ negative_prompt (`str` or `List[str]`, *optional*):
292
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
293
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
294
+ prompt_embeds (`torch.FloatTensor`, *optional*):
295
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
296
+ provided, text embeddings will be generated from `prompt` input argument.
297
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
298
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
299
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
300
+ argument.
301
+ """
302
+ if prompt is not None and isinstance(prompt, str):
303
+ batch_size = 1
304
+ elif prompt is not None and isinstance(prompt, list):
305
+ batch_size = len(prompt)
306
+ else:
307
+ batch_size = prompt_embeds.shape[0]
308
+
309
+ if prompt_embeds is None:
310
+ text_inputs = self.tokenizer(
311
+ prompt,
312
+ padding="max_length",
313
+ max_length=self.tokenizer.model_max_length,
314
+ truncation=True,
315
+ return_tensors="pt",
316
+ )
317
+ text_input_ids = text_inputs.input_ids
318
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
319
+
320
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
321
+ text_input_ids, untruncated_ids
322
+ ):
323
+ removed_text = self.tokenizer.batch_decode(
324
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
325
+ )
326
+ logger.warning(
327
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
328
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
329
+ )
330
+
331
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
332
+ attention_mask = text_inputs.attention_mask.to(device)
333
+ else:
334
+ attention_mask = None
335
+
336
+ prompt_embeds = self.text_encoder(
337
+ text_input_ids.to(device),
338
+ attention_mask=attention_mask,
339
+ )
340
+ prompt_embeds = prompt_embeds[0]
341
+
342
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
343
+
344
+ bs_embed, seq_len, _ = prompt_embeds.shape
345
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
346
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
347
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
348
+
349
+ # get unconditional embeddings for classifier free guidance
350
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
351
+ uncond_tokens: List[str]
352
+ if negative_prompt is None:
353
+ uncond_tokens = [""] * batch_size
354
+ elif type(prompt) is not type(negative_prompt):
355
+ raise TypeError(
356
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
357
+ f" {type(prompt)}."
358
+ )
359
+ elif isinstance(negative_prompt, str):
360
+ uncond_tokens = [negative_prompt]
361
+ elif batch_size != len(negative_prompt):
362
+ raise ValueError(
363
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
364
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
365
+ " the batch size of `prompt`."
366
+ )
367
+ else:
368
+ uncond_tokens = negative_prompt
369
+
370
+ max_length = prompt_embeds.shape[1]
371
+ uncond_input = self.tokenizer(
372
+ uncond_tokens,
373
+ padding="max_length",
374
+ max_length=max_length,
375
+ truncation=True,
376
+ return_tensors="pt",
377
+ )
378
+
379
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
380
+ attention_mask = uncond_input.attention_mask.to(device)
381
+ else:
382
+ attention_mask = None
383
+
384
+ negative_prompt_embeds = self.text_encoder(
385
+ uncond_input.input_ids.to(device),
386
+ attention_mask=attention_mask,
387
+ )
388
+ negative_prompt_embeds = negative_prompt_embeds[0]
389
+
390
+ if do_classifier_free_guidance:
391
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
392
+ seq_len = negative_prompt_embeds.shape[1]
393
+
394
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
395
+
396
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
397
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
398
+
399
+ # For classifier free guidance, we need to do two forward passes.
400
+ # Here we concatenate the unconditional and text embeddings into a single batch
401
+ # to avoid doing two forward passes
402
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
403
+
404
+ return prompt_embeds
405
+
406
+ def run_safety_checker(self, image, device, dtype):
407
+ if self.safety_checker is not None:
408
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
409
+ image, has_nsfw_concept = self.safety_checker(
410
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
411
+ )
412
+ else:
413
+ has_nsfw_concept = None
414
+ return image, has_nsfw_concept
415
+
416
+ def decode_latents(self, latents):
417
+ latents = 1 / self.vae.config.scaling_factor * latents
418
+ image = self.vae.decode(latents).sample
419
+ image = (image / 2 + 0.5).clamp(0, 1)
420
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
421
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
422
+ return image
423
+
424
+ def prepare_extra_step_kwargs(self, generator, eta):
425
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
426
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
427
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
428
+ # and should be between [0, 1]
429
+
430
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
431
+ extra_step_kwargs = {}
432
+ if accepts_eta:
433
+ extra_step_kwargs["eta"] = eta
434
+
435
+ # check if the scheduler accepts generator
436
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
437
+ if accepts_generator:
438
+ extra_step_kwargs["generator"] = generator
439
+ return extra_step_kwargs
440
+
441
+ def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds):
442
+ image_is_pil = isinstance(image, PIL.Image.Image)
443
+ image_is_tensor = isinstance(image, torch.Tensor)
444
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
445
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
446
+
447
+ if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:
448
+ raise TypeError(
449
+ "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
450
+ )
451
+
452
+ if image_is_pil:
453
+ image_batch_size = 1
454
+ elif image_is_tensor:
455
+ image_batch_size = image.shape[0]
456
+ elif image_is_pil_list:
457
+ image_batch_size = len(image)
458
+ elif image_is_tensor_list:
459
+ image_batch_size = len(image)
460
+ else:
461
+ raise ValueError("controlnet condition image is not valid")
462
+
463
+ if prompt is not None and isinstance(prompt, str):
464
+ prompt_batch_size = 1
465
+ elif prompt is not None and isinstance(prompt, list):
466
+ prompt_batch_size = len(prompt)
467
+ elif prompt_embeds is not None:
468
+ prompt_batch_size = prompt_embeds.shape[0]
469
+ else:
470
+ raise ValueError("prompt or prompt_embeds are not valid")
471
+
472
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
473
+ raise ValueError(
474
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
475
+ )
476
+
477
+ def check_inputs(
478
+ self,
479
+ prompt,
480
+ image,
481
+ controlnet_conditioning_image,
482
+ height,
483
+ width,
484
+ callback_steps,
485
+ negative_prompt=None,
486
+ prompt_embeds=None,
487
+ negative_prompt_embeds=None,
488
+ strength=None,
489
+ controlnet_guidance_start=None,
490
+ controlnet_guidance_end=None,
491
+ controlnet_conditioning_scale=None,
492
+ ):
493
+ if height % 8 != 0 or width % 8 != 0:
494
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
495
+
496
+ if (callback_steps is None) or (
497
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
498
+ ):
499
+ raise ValueError(
500
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
501
+ f" {type(callback_steps)}."
502
+ )
503
+
504
+ if prompt is not None and prompt_embeds is not None:
505
+ raise ValueError(
506
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
507
+ " only forward one of the two."
508
+ )
509
+ elif prompt is None and prompt_embeds is None:
510
+ raise ValueError(
511
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
512
+ )
513
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
514
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
515
+
516
+ if negative_prompt is not None and negative_prompt_embeds is not None:
517
+ raise ValueError(
518
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
519
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
520
+ )
521
+
522
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
523
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
524
+ raise ValueError(
525
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
526
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
527
+ f" {negative_prompt_embeds.shape}."
528
+ )
529
+
530
+ # check controlnet condition image
531
+
532
+ if isinstance(self.controlnet, ControlNetModel):
533
+ self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds)
534
+ elif isinstance(self.controlnet, MultiControlNetModel):
535
+ if not isinstance(controlnet_conditioning_image, list):
536
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
537
+
538
+ if len(controlnet_conditioning_image) != len(self.controlnet.nets):
539
+ raise ValueError(
540
+ "For multiple controlnets: `image` must have the same length as the number of controlnets."
541
+ )
542
+
543
+ for image_ in controlnet_conditioning_image:
544
+ self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds)
545
+ else:
546
+ assert False
547
+
548
+ # Check `controlnet_conditioning_scale`
549
+
550
+ if isinstance(self.controlnet, ControlNetModel):
551
+ if not isinstance(controlnet_conditioning_scale, float):
552
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
553
+ elif isinstance(self.controlnet, MultiControlNetModel):
554
+ if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
555
+ self.controlnet.nets
556
+ ):
557
+ raise ValueError(
558
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
559
+ " the same length as the number of controlnets"
560
+ )
561
+ else:
562
+ assert False
563
+
564
+ if isinstance(image, torch.Tensor):
565
+ if image.ndim != 3 and image.ndim != 4:
566
+ raise ValueError("`image` must have 3 or 4 dimensions")
567
+
568
+ if image.ndim == 3:
569
+ image_batch_size = 1
570
+ image_channels, image_height, image_width = image.shape
571
+ elif image.ndim == 4:
572
+ image_batch_size, image_channels, image_height, image_width = image.shape
573
+ else:
574
+ assert False
575
+
576
+ if image_channels != 3:
577
+ raise ValueError("`image` must have 3 channels")
578
+
579
+ if image.min() < -1 or image.max() > 1:
580
+ raise ValueError("`image` should be in range [-1, 1]")
581
+
582
+ if self.vae.config.latent_channels != self.unet.config.in_channels:
583
+ raise ValueError(
584
+ f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received"
585
+ f" latent channels: {self.vae.config.latent_channels},"
586
+ f" Please verify the config of `pipeline.unet` and the `pipeline.vae`"
587
+ )
588
+
589
+ if strength < 0 or strength > 1:
590
+ raise ValueError(f"The value of `strength` should in [0.0, 1.0] but is {strength}")
591
+
592
+ if controlnet_guidance_start < 0 or controlnet_guidance_start > 1:
593
+ raise ValueError(
594
+ f"The value of `controlnet_guidance_start` should in [0.0, 1.0] but is {controlnet_guidance_start}"
595
+ )
596
+
597
+ if controlnet_guidance_end < 0 or controlnet_guidance_end > 1:
598
+ raise ValueError(
599
+ f"The value of `controlnet_guidance_end` should in [0.0, 1.0] but is {controlnet_guidance_end}"
600
+ )
601
+
602
+ if controlnet_guidance_start > controlnet_guidance_end:
603
+ raise ValueError(
604
+ "The value of `controlnet_guidance_start` should be less than `controlnet_guidance_end`, but got"
605
+ f" `controlnet_guidance_start` {controlnet_guidance_start} >= `controlnet_guidance_end` {controlnet_guidance_end}"
606
+ )
607
+
608
+ def get_timesteps(self, num_inference_steps, strength, device):
609
+ # get the original timestep using init_timestep
610
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
611
+
612
+ t_start = max(num_inference_steps - init_timestep, 0)
613
+ timesteps = self.scheduler.timesteps[t_start:]
614
+
615
+ return timesteps, num_inference_steps - t_start
616
+
617
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
618
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
619
+ raise ValueError(
620
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
621
+ )
622
+
623
+ image = image.to(device=device, dtype=dtype)
624
+
625
+ batch_size = batch_size * num_images_per_prompt
626
+ if isinstance(generator, list) and len(generator) != batch_size:
627
+ raise ValueError(
628
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
629
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
630
+ )
631
+
632
+ if isinstance(generator, list):
633
+ init_latents = [
634
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
635
+ ]
636
+ init_latents = torch.cat(init_latents, dim=0)
637
+ else:
638
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
639
+
640
+ init_latents = self.vae.config.scaling_factor * init_latents
641
+
642
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
643
+ raise ValueError(
644
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
645
+ )
646
+ else:
647
+ init_latents = torch.cat([init_latents], dim=0)
648
+
649
+ shape = init_latents.shape
650
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
651
+
652
+ # get latents
653
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
654
+ latents = init_latents
655
+
656
+ return latents
657
+
658
+ def _default_height_width(self, height, width, image):
659
+ if isinstance(image, list):
660
+ image = image[0]
661
+
662
+ if height is None:
663
+ if isinstance(image, PIL.Image.Image):
664
+ height = image.height
665
+ elif isinstance(image, torch.Tensor):
666
+ height = image.shape[3]
667
+
668
+ height = (height // 8) * 8 # round down to nearest multiple of 8
669
+
670
+ if width is None:
671
+ if isinstance(image, PIL.Image.Image):
672
+ width = image.width
673
+ elif isinstance(image, torch.Tensor):
674
+ width = image.shape[2]
675
+
676
+ width = (width // 8) * 8 # round down to nearest multiple of 8
677
+
678
+ return height, width
679
+
680
+ @torch.no_grad()
681
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
682
+ def __call__(
683
+ self,
684
+ prompt: Union[str, List[str]] = None,
685
+ image: Union[torch.Tensor, PIL.Image.Image] = None,
686
+ controlnet_conditioning_image: Union[
687
+ torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]
688
+ ] = None,
689
+ strength: float = 0.8,
690
+ height: Optional[int] = None,
691
+ width: Optional[int] = None,
692
+ num_inference_steps: int = 50,
693
+ guidance_scale: float = 7.5,
694
+ negative_prompt: Optional[Union[str, List[str]]] = None,
695
+ num_images_per_prompt: Optional[int] = 1,
696
+ eta: float = 0.0,
697
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
698
+ latents: Optional[torch.FloatTensor] = None,
699
+ prompt_embeds: Optional[torch.FloatTensor] = None,
700
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
701
+ output_type: Optional[str] = "pil",
702
+ return_dict: bool = True,
703
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
704
+ callback_steps: int = 1,
705
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
706
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
707
+ controlnet_guidance_start: float = 0.0,
708
+ controlnet_guidance_end: float = 1.0,
709
+ ):
710
+ r"""
711
+ Function invoked when calling the pipeline for generation.
712
+
713
+ Args:
714
+ prompt (`str` or `List[str]`, *optional*):
715
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
716
+ instead.
717
+ image (`torch.Tensor` or `PIL.Image.Image`):
718
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
719
+ be masked out with `mask_image` and repainted according to `prompt`.
720
+ controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
721
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
722
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
723
+ also be accepted as an image. The control image is automatically resized to fit the output image.
724
+ strength (`float`, *optional*):
725
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
726
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
727
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
728
+ be maximum and the denoising process will run for the full number of iterations specified in
729
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
730
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
731
+ The height in pixels of the generated image.
732
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
733
+ The width in pixels of the generated image.
734
+ num_inference_steps (`int`, *optional*, defaults to 50):
735
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
736
+ expense of slower inference.
737
+ guidance_scale (`float`, *optional*, defaults to 7.5):
738
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
739
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
740
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
741
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
742
+ usually at the expense of lower image quality.
743
+ negative_prompt (`str` or `List[str]`, *optional*):
744
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
745
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
746
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
747
+ The number of images to generate per prompt.
748
+ eta (`float`, *optional*, defaults to 0.0):
749
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
750
+ [`schedulers.DDIMScheduler`], will be ignored for others.
751
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
752
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
753
+ to make generation deterministic.
754
+ latents (`torch.FloatTensor`, *optional*):
755
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
756
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
757
+ tensor will ge generated by sampling using the supplied random `generator`.
758
+ prompt_embeds (`torch.FloatTensor`, *optional*):
759
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
760
+ provided, text embeddings will be generated from `prompt` input argument.
761
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
762
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
763
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
764
+ argument.
765
+ output_type (`str`, *optional*, defaults to `"pil"`):
766
+ The output format of the generate image. Choose between
767
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
768
+ return_dict (`bool`, *optional*, defaults to `True`):
769
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
770
+ plain tuple.
771
+ callback (`Callable`, *optional*):
772
+ A function that will be called every `callback_steps` steps during inference. The function will be
773
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
774
+ callback_steps (`int`, *optional*, defaults to 1):
775
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
776
+ called at every step.
777
+ cross_attention_kwargs (`dict`, *optional*):
778
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
779
+ `self.processor` in
780
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
781
+ controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
782
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
783
+ to the residual in the original unet.
784
+ controlnet_guidance_start ('float', *optional*, defaults to 0.0):
785
+ The percentage of total steps the controlnet starts applying. Must be between 0 and 1.
786
+ controlnet_guidance_end ('float', *optional*, defaults to 1.0):
787
+ The percentage of total steps the controlnet ends applying. Must be between 0 and 1. Must be greater
788
+ than `controlnet_guidance_start`.
789
+
790
+ Examples:
791
+
792
+ Returns:
793
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
794
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
795
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
796
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
797
+ (nsfw) content, according to the `safety_checker`.
798
+ """
799
+ # 0. Default height and width to unet
800
+ height, width = self._default_height_width(height, width, controlnet_conditioning_image)
801
+
802
+ # 1. Check inputs. Raise error if not correct
803
+ self.check_inputs(
804
+ prompt,
805
+ image,
806
+ controlnet_conditioning_image,
807
+ height,
808
+ width,
809
+ callback_steps,
810
+ negative_prompt,
811
+ prompt_embeds,
812
+ negative_prompt_embeds,
813
+ strength,
814
+ controlnet_guidance_start,
815
+ controlnet_guidance_end,
816
+ controlnet_conditioning_scale,
817
+ )
818
+
819
+ # 2. Define call parameters
820
+ if prompt is not None and isinstance(prompt, str):
821
+ batch_size = 1
822
+ elif prompt is not None and isinstance(prompt, list):
823
+ batch_size = len(prompt)
824
+ else:
825
+ batch_size = prompt_embeds.shape[0]
826
+
827
+ device = self._execution_device
828
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
829
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
830
+ # corresponds to doing no classifier free guidance.
831
+ do_classifier_free_guidance = guidance_scale > 1.0
832
+
833
+ if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
834
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)
835
+
836
+ # 3. Encode input prompt
837
+ prompt_embeds = self._encode_prompt(
838
+ prompt,
839
+ device,
840
+ num_images_per_prompt,
841
+ do_classifier_free_guidance,
842
+ negative_prompt,
843
+ prompt_embeds=prompt_embeds,
844
+ negative_prompt_embeds=negative_prompt_embeds,
845
+ )
846
+
847
+ # 4. Prepare image, and controlnet_conditioning_image
848
+ image = prepare_image(image)
849
+
850
+ # condition image(s)
851
+ if isinstance(self.controlnet, ControlNetModel):
852
+ controlnet_conditioning_image = prepare_controlnet_conditioning_image(
853
+ controlnet_conditioning_image=controlnet_conditioning_image,
854
+ width=width,
855
+ height=height,
856
+ batch_size=batch_size * num_images_per_prompt,
857
+ num_images_per_prompt=num_images_per_prompt,
858
+ device=device,
859
+ dtype=self.controlnet.dtype,
860
+ do_classifier_free_guidance=do_classifier_free_guidance,
861
+ )
862
+ elif isinstance(self.controlnet, MultiControlNetModel):
863
+ controlnet_conditioning_images = []
864
+
865
+ for image_ in controlnet_conditioning_image:
866
+ image_ = prepare_controlnet_conditioning_image(
867
+ controlnet_conditioning_image=image_,
868
+ width=width,
869
+ height=height,
870
+ batch_size=batch_size * num_images_per_prompt,
871
+ num_images_per_prompt=num_images_per_prompt,
872
+ device=device,
873
+ dtype=self.controlnet.dtype,
874
+ do_classifier_free_guidance=do_classifier_free_guidance,
875
+ )
876
+
877
+ controlnet_conditioning_images.append(image_)
878
+
879
+ controlnet_conditioning_image = controlnet_conditioning_images
880
+ else:
881
+ assert False
882
+
883
+ # 5. Prepare timesteps
884
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
885
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
886
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
887
+
888
+ # 6. Prepare latent variables
889
+ latents = self.prepare_latents(
890
+ image,
891
+ latent_timestep,
892
+ batch_size,
893
+ num_images_per_prompt,
894
+ prompt_embeds.dtype,
895
+ device,
896
+ generator,
897
+ )
898
+
899
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
900
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
901
+
902
+ # 8. Denoising loop
903
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
904
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
905
+ for i, t in enumerate(timesteps):
906
+ # expand the latents if we are doing classifier free guidance
907
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
908
+
909
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
910
+
911
+ # compute the percentage of total steps we are at
912
+ current_sampling_percent = i / len(timesteps)
913
+
914
+ if (
915
+ current_sampling_percent < controlnet_guidance_start
916
+ or current_sampling_percent > controlnet_guidance_end
917
+ ):
918
+ # do not apply the controlnet
919
+ down_block_res_samples = None
920
+ mid_block_res_sample = None
921
+ else:
922
+ # apply the controlnet
923
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
924
+ latent_model_input,
925
+ t,
926
+ encoder_hidden_states=prompt_embeds,
927
+ controlnet_cond=controlnet_conditioning_image,
928
+ conditioning_scale=controlnet_conditioning_scale,
929
+ return_dict=False,
930
+ )
931
+
932
+ # predict the noise residual
933
+ noise_pred = self.unet(
934
+ latent_model_input,
935
+ t,
936
+ encoder_hidden_states=prompt_embeds,
937
+ cross_attention_kwargs=cross_attention_kwargs,
938
+ down_block_additional_residuals=down_block_res_samples,
939
+ mid_block_additional_residual=mid_block_res_sample,
940
+ ).sample
941
+
942
+ # perform guidance
943
+ if do_classifier_free_guidance:
944
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
945
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
946
+
947
+ # compute the previous noisy sample x_t -> x_t-1
948
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
949
+
950
+ # call the callback, if provided
951
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
952
+ progress_bar.update()
953
+ if callback is not None and i % callback_steps == 0:
954
+ step_idx = i // getattr(self.scheduler, "order", 1)
955
+ callback(step_idx, t, latents)
956
+
957
+ # If we do sequential model offloading, let's offload unet and controlnet
958
+ # manually for max memory savings
959
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
960
+ self.unet.to("cpu")
961
+ self.controlnet.to("cpu")
962
+ torch.cuda.empty_cache()
963
+
964
+ if output_type == "latent":
965
+ image = latents
966
+ has_nsfw_concept = None
967
+ elif output_type == "pil":
968
+ # 8. Post-processing
969
+ image = self.decode_latents(latents)
970
+
971
+ # 9. Run safety checker
972
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
973
+
974
+ # 10. Convert to PIL
975
+ image = self.numpy_to_pil(image)
976
+ else:
977
+ # 8. Post-processing
978
+ image = self.decode_latents(latents)
979
+
980
+ # 9. Run safety checker
981
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
982
+
983
+ # Offload last model to CPU
984
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
985
+ self.final_offload_hook.offload()
986
+
987
+ if not return_dict:
988
+ return (image, has_nsfw_concept)
989
+
990
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/stable_diffusion_controlnet_inpaint.py ADDED
@@ -0,0 +1,1139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
2
+
3
+ import inspect
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5
+
6
+ import numpy as np
7
+ import PIL.Image
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
11
+
12
+ from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
13
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
14
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
15
+ from diffusers.schedulers import KarrasDiffusionSchedulers
16
+ from diffusers.utils import (
17
+ PIL_INTERPOLATION,
18
+ is_accelerate_available,
19
+ is_accelerate_version,
20
+ replace_example_docstring,
21
+ )
22
+ from diffusers.utils.torch_utils import randn_tensor
23
+
24
+
25
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
26
+
27
+ EXAMPLE_DOC_STRING = """
28
+ Examples:
29
+ ```py
30
+ >>> import numpy as np
31
+ >>> import torch
32
+ >>> from PIL import Image
33
+ >>> from stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
34
+
35
+ >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
36
+ >>> from diffusers import ControlNetModel, UniPCMultistepScheduler
37
+ >>> from diffusers.utils import load_image
38
+
39
+ >>> def ade_palette():
40
+ return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
41
+ [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
42
+ [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
43
+ [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
44
+ [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
45
+ [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
46
+ [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
47
+ [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
48
+ [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
49
+ [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
50
+ [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
51
+ [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
52
+ [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
53
+ [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
54
+ [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
55
+ [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
56
+ [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
57
+ [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
58
+ [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
59
+ [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
60
+ [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
61
+ [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
62
+ [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
63
+ [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
64
+ [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
65
+ [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
66
+ [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
67
+ [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
68
+ [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
69
+ [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
70
+ [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
71
+ [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
72
+ [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
73
+ [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
74
+ [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
75
+ [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
76
+ [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
77
+ [102, 255, 0], [92, 0, 255]]
78
+
79
+ >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
80
+ >>> image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
81
+
82
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
83
+
84
+ >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
85
+ "runwayml/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
86
+ )
87
+
88
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
89
+ >>> pipe.enable_xformers_memory_efficient_attention()
90
+ >>> pipe.enable_model_cpu_offload()
91
+
92
+ >>> def image_to_seg(image):
93
+ pixel_values = image_processor(image, return_tensors="pt").pixel_values
94
+ with torch.no_grad():
95
+ outputs = image_segmentor(pixel_values)
96
+ seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
97
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
98
+ palette = np.array(ade_palette())
99
+ for label, color in enumerate(palette):
100
+ color_seg[seg == label, :] = color
101
+ color_seg = color_seg.astype(np.uint8)
102
+ seg_image = Image.fromarray(color_seg)
103
+ return seg_image
104
+
105
+ >>> image = load_image(
106
+ "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
107
+ )
108
+
109
+ >>> mask_image = load_image(
110
+ "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
111
+ )
112
+
113
+ >>> controlnet_conditioning_image = image_to_seg(image)
114
+
115
+ >>> image = pipe(
116
+ "Face of a yellow cat, high resolution, sitting on a park bench",
117
+ image,
118
+ mask_image,
119
+ controlnet_conditioning_image,
120
+ num_inference_steps=20,
121
+ ).images[0]
122
+
123
+ >>> image.save("out.png")
124
+ ```
125
+ """
126
+
127
+
128
+ def prepare_image(image):
129
+ if isinstance(image, torch.Tensor):
130
+ # Batch single image
131
+ if image.ndim == 3:
132
+ image = image.unsqueeze(0)
133
+
134
+ image = image.to(dtype=torch.float32)
135
+ else:
136
+ # preprocess image
137
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
138
+ image = [image]
139
+
140
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
141
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
142
+ image = np.concatenate(image, axis=0)
143
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
144
+ image = np.concatenate([i[None, :] for i in image], axis=0)
145
+
146
+ image = image.transpose(0, 3, 1, 2)
147
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
148
+
149
+ return image
150
+
151
+
152
+ def prepare_mask_image(mask_image):
153
+ if isinstance(mask_image, torch.Tensor):
154
+ if mask_image.ndim == 2:
155
+ # Batch and add channel dim for single mask
156
+ mask_image = mask_image.unsqueeze(0).unsqueeze(0)
157
+ elif mask_image.ndim == 3 and mask_image.shape[0] == 1:
158
+ # Single mask, the 0'th dimension is considered to be
159
+ # the existing batch size of 1
160
+ mask_image = mask_image.unsqueeze(0)
161
+ elif mask_image.ndim == 3 and mask_image.shape[0] != 1:
162
+ # Batch of mask, the 0'th dimension is considered to be
163
+ # the batching dimension
164
+ mask_image = mask_image.unsqueeze(1)
165
+
166
+ # Binarize mask
167
+ mask_image[mask_image < 0.5] = 0
168
+ mask_image[mask_image >= 0.5] = 1
169
+ else:
170
+ # preprocess mask
171
+ if isinstance(mask_image, (PIL.Image.Image, np.ndarray)):
172
+ mask_image = [mask_image]
173
+
174
+ if isinstance(mask_image, list) and isinstance(mask_image[0], PIL.Image.Image):
175
+ mask_image = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask_image], axis=0)
176
+ mask_image = mask_image.astype(np.float32) / 255.0
177
+ elif isinstance(mask_image, list) and isinstance(mask_image[0], np.ndarray):
178
+ mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0)
179
+
180
+ mask_image[mask_image < 0.5] = 0
181
+ mask_image[mask_image >= 0.5] = 1
182
+ mask_image = torch.from_numpy(mask_image)
183
+
184
+ return mask_image
185
+
186
+
187
+ def prepare_controlnet_conditioning_image(
188
+ controlnet_conditioning_image,
189
+ width,
190
+ height,
191
+ batch_size,
192
+ num_images_per_prompt,
193
+ device,
194
+ dtype,
195
+ do_classifier_free_guidance,
196
+ ):
197
+ if not isinstance(controlnet_conditioning_image, torch.Tensor):
198
+ if isinstance(controlnet_conditioning_image, PIL.Image.Image):
199
+ controlnet_conditioning_image = [controlnet_conditioning_image]
200
+
201
+ if isinstance(controlnet_conditioning_image[0], PIL.Image.Image):
202
+ controlnet_conditioning_image = [
203
+ np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :]
204
+ for i in controlnet_conditioning_image
205
+ ]
206
+ controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0)
207
+ controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0
208
+ controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2)
209
+ controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image)
210
+ elif isinstance(controlnet_conditioning_image[0], torch.Tensor):
211
+ controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0)
212
+
213
+ image_batch_size = controlnet_conditioning_image.shape[0]
214
+
215
+ if image_batch_size == 1:
216
+ repeat_by = batch_size
217
+ else:
218
+ # image batch size is the same as prompt batch size
219
+ repeat_by = num_images_per_prompt
220
+
221
+ controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0)
222
+
223
+ controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
224
+
225
+ if do_classifier_free_guidance:
226
+ controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
227
+
228
+ return controlnet_conditioning_image
229
+
230
+
231
+ class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
232
+ """
233
+ Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
234
+ """
235
+
236
+ _optional_components = ["safety_checker", "feature_extractor"]
237
+
238
+ def __init__(
239
+ self,
240
+ vae: AutoencoderKL,
241
+ text_encoder: CLIPTextModel,
242
+ tokenizer: CLIPTokenizer,
243
+ unet: UNet2DConditionModel,
244
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
245
+ scheduler: KarrasDiffusionSchedulers,
246
+ safety_checker: StableDiffusionSafetyChecker,
247
+ feature_extractor: CLIPImageProcessor,
248
+ requires_safety_checker: bool = True,
249
+ ):
250
+ super().__init__()
251
+
252
+ if safety_checker is None and requires_safety_checker:
253
+ logger.warning(
254
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
255
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
256
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
257
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
258
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
259
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
260
+ )
261
+
262
+ if safety_checker is not None and feature_extractor is None:
263
+ raise ValueError(
264
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
265
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
266
+ )
267
+
268
+ if isinstance(controlnet, (list, tuple)):
269
+ controlnet = MultiControlNetModel(controlnet)
270
+
271
+ self.register_modules(
272
+ vae=vae,
273
+ text_encoder=text_encoder,
274
+ tokenizer=tokenizer,
275
+ unet=unet,
276
+ controlnet=controlnet,
277
+ scheduler=scheduler,
278
+ safety_checker=safety_checker,
279
+ feature_extractor=feature_extractor,
280
+ )
281
+
282
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
283
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
284
+
285
+ def enable_vae_slicing(self):
286
+ r"""
287
+ Enable sliced VAE decoding.
288
+
289
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
290
+ steps. This is useful to save some memory and allow larger batch sizes.
291
+ """
292
+ self.vae.enable_slicing()
293
+
294
+ def disable_vae_slicing(self):
295
+ r"""
296
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
297
+ computing decoding in one step.
298
+ """
299
+ self.vae.disable_slicing()
300
+
301
+ def enable_sequential_cpu_offload(self, gpu_id=0):
302
+ r"""
303
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
304
+ text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
305
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
306
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
307
+ `enable_model_cpu_offload`, but performance is lower.
308
+ """
309
+ if is_accelerate_available():
310
+ from accelerate import cpu_offload
311
+ else:
312
+ raise ImportError("Please install accelerate via `pip install accelerate`")
313
+
314
+ device = torch.device(f"cuda:{gpu_id}")
315
+
316
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
317
+ cpu_offload(cpu_offloaded_model, device)
318
+
319
+ if self.safety_checker is not None:
320
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
321
+
322
+ def enable_model_cpu_offload(self, gpu_id=0):
323
+ r"""
324
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
325
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
326
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
327
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
328
+ """
329
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
330
+ from accelerate import cpu_offload_with_hook
331
+ else:
332
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
333
+
334
+ device = torch.device(f"cuda:{gpu_id}")
335
+
336
+ hook = None
337
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
338
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
339
+
340
+ if self.safety_checker is not None:
341
+ # the safety checker can offload the vae again
342
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
343
+
344
+ # control net hook has be manually offloaded as it alternates with unet
345
+ cpu_offload_with_hook(self.controlnet, device)
346
+
347
+ # We'll offload the last model manually.
348
+ self.final_offload_hook = hook
349
+
350
+ @property
351
+ def _execution_device(self):
352
+ r"""
353
+ Returns the device on which the pipeline's models will be executed. After calling
354
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
355
+ hooks.
356
+ """
357
+ if not hasattr(self.unet, "_hf_hook"):
358
+ return self.device
359
+ for module in self.unet.modules():
360
+ if (
361
+ hasattr(module, "_hf_hook")
362
+ and hasattr(module._hf_hook, "execution_device")
363
+ and module._hf_hook.execution_device is not None
364
+ ):
365
+ return torch.device(module._hf_hook.execution_device)
366
+ return self.device
367
+
368
+ def _encode_prompt(
369
+ self,
370
+ prompt,
371
+ device,
372
+ num_images_per_prompt,
373
+ do_classifier_free_guidance,
374
+ negative_prompt=None,
375
+ prompt_embeds: Optional[torch.FloatTensor] = None,
376
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
377
+ ):
378
+ r"""
379
+ Encodes the prompt into text encoder hidden states.
380
+
381
+ Args:
382
+ prompt (`str` or `List[str]`, *optional*):
383
+ prompt to be encoded
384
+ device: (`torch.device`):
385
+ torch device
386
+ num_images_per_prompt (`int`):
387
+ number of images that should be generated per prompt
388
+ do_classifier_free_guidance (`bool`):
389
+ whether to use classifier free guidance or not
390
+ negative_prompt (`str` or `List[str]`, *optional*):
391
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
392
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
393
+ prompt_embeds (`torch.FloatTensor`, *optional*):
394
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
395
+ provided, text embeddings will be generated from `prompt` input argument.
396
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
397
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
398
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
399
+ argument.
400
+ """
401
+ if prompt is not None and isinstance(prompt, str):
402
+ batch_size = 1
403
+ elif prompt is not None and isinstance(prompt, list):
404
+ batch_size = len(prompt)
405
+ else:
406
+ batch_size = prompt_embeds.shape[0]
407
+
408
+ if prompt_embeds is None:
409
+ text_inputs = self.tokenizer(
410
+ prompt,
411
+ padding="max_length",
412
+ max_length=self.tokenizer.model_max_length,
413
+ truncation=True,
414
+ return_tensors="pt",
415
+ )
416
+ text_input_ids = text_inputs.input_ids
417
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
418
+
419
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
420
+ text_input_ids, untruncated_ids
421
+ ):
422
+ removed_text = self.tokenizer.batch_decode(
423
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
424
+ )
425
+ logger.warning(
426
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
427
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
428
+ )
429
+
430
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
431
+ attention_mask = text_inputs.attention_mask.to(device)
432
+ else:
433
+ attention_mask = None
434
+
435
+ prompt_embeds = self.text_encoder(
436
+ text_input_ids.to(device),
437
+ attention_mask=attention_mask,
438
+ )
439
+ prompt_embeds = prompt_embeds[0]
440
+
441
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
442
+
443
+ bs_embed, seq_len, _ = prompt_embeds.shape
444
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
445
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
446
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
447
+
448
+ # get unconditional embeddings for classifier free guidance
449
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
450
+ uncond_tokens: List[str]
451
+ if negative_prompt is None:
452
+ uncond_tokens = [""] * batch_size
453
+ elif type(prompt) is not type(negative_prompt):
454
+ raise TypeError(
455
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
456
+ f" {type(prompt)}."
457
+ )
458
+ elif isinstance(negative_prompt, str):
459
+ uncond_tokens = [negative_prompt]
460
+ elif batch_size != len(negative_prompt):
461
+ raise ValueError(
462
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
463
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
464
+ " the batch size of `prompt`."
465
+ )
466
+ else:
467
+ uncond_tokens = negative_prompt
468
+
469
+ max_length = prompt_embeds.shape[1]
470
+ uncond_input = self.tokenizer(
471
+ uncond_tokens,
472
+ padding="max_length",
473
+ max_length=max_length,
474
+ truncation=True,
475
+ return_tensors="pt",
476
+ )
477
+
478
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
479
+ attention_mask = uncond_input.attention_mask.to(device)
480
+ else:
481
+ attention_mask = None
482
+
483
+ negative_prompt_embeds = self.text_encoder(
484
+ uncond_input.input_ids.to(device),
485
+ attention_mask=attention_mask,
486
+ )
487
+ negative_prompt_embeds = negative_prompt_embeds[0]
488
+
489
+ if do_classifier_free_guidance:
490
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
491
+ seq_len = negative_prompt_embeds.shape[1]
492
+
493
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
494
+
495
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
496
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
497
+
498
+ # For classifier free guidance, we need to do two forward passes.
499
+ # Here we concatenate the unconditional and text embeddings into a single batch
500
+ # to avoid doing two forward passes
501
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
502
+
503
+ return prompt_embeds
504
+
505
+ def run_safety_checker(self, image, device, dtype):
506
+ if self.safety_checker is not None:
507
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
508
+ image, has_nsfw_concept = self.safety_checker(
509
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
510
+ )
511
+ else:
512
+ has_nsfw_concept = None
513
+ return image, has_nsfw_concept
514
+
515
+ def decode_latents(self, latents):
516
+ latents = 1 / self.vae.config.scaling_factor * latents
517
+ image = self.vae.decode(latents).sample
518
+ image = (image / 2 + 0.5).clamp(0, 1)
519
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
520
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
521
+ return image
522
+
523
+ def prepare_extra_step_kwargs(self, generator, eta):
524
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
525
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
526
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
527
+ # and should be between [0, 1]
528
+
529
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
530
+ extra_step_kwargs = {}
531
+ if accepts_eta:
532
+ extra_step_kwargs["eta"] = eta
533
+
534
+ # check if the scheduler accepts generator
535
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
536
+ if accepts_generator:
537
+ extra_step_kwargs["generator"] = generator
538
+ return extra_step_kwargs
539
+
540
+ def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds):
541
+ image_is_pil = isinstance(image, PIL.Image.Image)
542
+ image_is_tensor = isinstance(image, torch.Tensor)
543
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
544
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
545
+
546
+ if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:
547
+ raise TypeError(
548
+ "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
549
+ )
550
+
551
+ if image_is_pil:
552
+ image_batch_size = 1
553
+ elif image_is_tensor:
554
+ image_batch_size = image.shape[0]
555
+ elif image_is_pil_list:
556
+ image_batch_size = len(image)
557
+ elif image_is_tensor_list:
558
+ image_batch_size = len(image)
559
+ else:
560
+ raise ValueError("controlnet condition image is not valid")
561
+
562
+ if prompt is not None and isinstance(prompt, str):
563
+ prompt_batch_size = 1
564
+ elif prompt is not None and isinstance(prompt, list):
565
+ prompt_batch_size = len(prompt)
566
+ elif prompt_embeds is not None:
567
+ prompt_batch_size = prompt_embeds.shape[0]
568
+ else:
569
+ raise ValueError("prompt or prompt_embeds are not valid")
570
+
571
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
572
+ raise ValueError(
573
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
574
+ )
575
+
576
+ def check_inputs(
577
+ self,
578
+ prompt,
579
+ image,
580
+ mask_image,
581
+ controlnet_conditioning_image,
582
+ height,
583
+ width,
584
+ callback_steps,
585
+ negative_prompt=None,
586
+ prompt_embeds=None,
587
+ negative_prompt_embeds=None,
588
+ controlnet_conditioning_scale=None,
589
+ ):
590
+ if height % 8 != 0 or width % 8 != 0:
591
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
592
+
593
+ if (callback_steps is None) or (
594
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
595
+ ):
596
+ raise ValueError(
597
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
598
+ f" {type(callback_steps)}."
599
+ )
600
+
601
+ if prompt is not None and prompt_embeds is not None:
602
+ raise ValueError(
603
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
604
+ " only forward one of the two."
605
+ )
606
+ elif prompt is None and prompt_embeds is None:
607
+ raise ValueError(
608
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
609
+ )
610
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
611
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
612
+
613
+ if negative_prompt is not None and negative_prompt_embeds is not None:
614
+ raise ValueError(
615
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
616
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
617
+ )
618
+
619
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
620
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
621
+ raise ValueError(
622
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
623
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
624
+ f" {negative_prompt_embeds.shape}."
625
+ )
626
+
627
+ # check controlnet condition image
628
+ if isinstance(self.controlnet, ControlNetModel):
629
+ self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds)
630
+ elif isinstance(self.controlnet, MultiControlNetModel):
631
+ if not isinstance(controlnet_conditioning_image, list):
632
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
633
+ if len(controlnet_conditioning_image) != len(self.controlnet.nets):
634
+ raise ValueError(
635
+ "For multiple controlnets: `image` must have the same length as the number of controlnets."
636
+ )
637
+ for image_ in controlnet_conditioning_image:
638
+ self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds)
639
+ else:
640
+ assert False
641
+
642
+ # Check `controlnet_conditioning_scale`
643
+ if isinstance(self.controlnet, ControlNetModel):
644
+ if not isinstance(controlnet_conditioning_scale, float):
645
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
646
+ elif isinstance(self.controlnet, MultiControlNetModel):
647
+ if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
648
+ self.controlnet.nets
649
+ ):
650
+ raise ValueError(
651
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
652
+ " the same length as the number of controlnets"
653
+ )
654
+ else:
655
+ assert False
656
+
657
+ if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor):
658
+ raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor")
659
+
660
+ if isinstance(image, PIL.Image.Image) and not isinstance(mask_image, PIL.Image.Image):
661
+ raise TypeError("if `image` is a PIL image, `mask_image` must also be a PIL image")
662
+
663
+ if isinstance(image, torch.Tensor):
664
+ if image.ndim != 3 and image.ndim != 4:
665
+ raise ValueError("`image` must have 3 or 4 dimensions")
666
+
667
+ if mask_image.ndim != 2 and mask_image.ndim != 3 and mask_image.ndim != 4:
668
+ raise ValueError("`mask_image` must have 2, 3, or 4 dimensions")
669
+
670
+ if image.ndim == 3:
671
+ image_batch_size = 1
672
+ image_channels, image_height, image_width = image.shape
673
+ elif image.ndim == 4:
674
+ image_batch_size, image_channels, image_height, image_width = image.shape
675
+ else:
676
+ assert False
677
+
678
+ if mask_image.ndim == 2:
679
+ mask_image_batch_size = 1
680
+ mask_image_channels = 1
681
+ mask_image_height, mask_image_width = mask_image.shape
682
+ elif mask_image.ndim == 3:
683
+ mask_image_channels = 1
684
+ mask_image_batch_size, mask_image_height, mask_image_width = mask_image.shape
685
+ elif mask_image.ndim == 4:
686
+ mask_image_batch_size, mask_image_channels, mask_image_height, mask_image_width = mask_image.shape
687
+
688
+ if image_channels != 3:
689
+ raise ValueError("`image` must have 3 channels")
690
+
691
+ if mask_image_channels != 1:
692
+ raise ValueError("`mask_image` must have 1 channel")
693
+
694
+ if image_batch_size != mask_image_batch_size:
695
+ raise ValueError("`image` and `mask_image` mush have the same batch sizes")
696
+
697
+ if image_height != mask_image_height or image_width != mask_image_width:
698
+ raise ValueError("`image` and `mask_image` must have the same height and width dimensions")
699
+
700
+ if image.min() < -1 or image.max() > 1:
701
+ raise ValueError("`image` should be in range [-1, 1]")
702
+
703
+ if mask_image.min() < 0 or mask_image.max() > 1:
704
+ raise ValueError("`mask_image` should be in range [0, 1]")
705
+ else:
706
+ mask_image_channels = 1
707
+ image_channels = 3
708
+
709
+ single_image_latent_channels = self.vae.config.latent_channels
710
+
711
+ total_latent_channels = single_image_latent_channels * 2 + mask_image_channels
712
+
713
+ if total_latent_channels != self.unet.config.in_channels:
714
+ raise ValueError(
715
+ f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received"
716
+ f" non inpainting latent channels: {single_image_latent_channels},"
717
+ f" mask channels: {mask_image_channels}, and masked image channels: {single_image_latent_channels}."
718
+ f" Please verify the config of `pipeline.unet` and the `mask_image` and `image` inputs."
719
+ )
720
+
721
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
722
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
723
+ if isinstance(generator, list) and len(generator) != batch_size:
724
+ raise ValueError(
725
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
726
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
727
+ )
728
+
729
+ if latents is None:
730
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
731
+ else:
732
+ latents = latents.to(device)
733
+
734
+ # scale the initial noise by the standard deviation required by the scheduler
735
+ latents = latents * self.scheduler.init_noise_sigma
736
+
737
+ return latents
738
+
739
+ def prepare_mask_latents(self, mask_image, batch_size, height, width, dtype, device, do_classifier_free_guidance):
740
+ # resize the mask to latents shape as we concatenate the mask to the latents
741
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
742
+ # and half precision
743
+ mask_image = F.interpolate(mask_image, size=(height // self.vae_scale_factor, width // self.vae_scale_factor))
744
+ mask_image = mask_image.to(device=device, dtype=dtype)
745
+
746
+ # duplicate mask for each generation per prompt, using mps friendly method
747
+ if mask_image.shape[0] < batch_size:
748
+ if not batch_size % mask_image.shape[0] == 0:
749
+ raise ValueError(
750
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
751
+ f" a total batch size of {batch_size}, but {mask_image.shape[0]} masks were passed. Make sure the number"
752
+ " of masks that you pass is divisible by the total requested batch size."
753
+ )
754
+ mask_image = mask_image.repeat(batch_size // mask_image.shape[0], 1, 1, 1)
755
+
756
+ mask_image = torch.cat([mask_image] * 2) if do_classifier_free_guidance else mask_image
757
+
758
+ mask_image_latents = mask_image
759
+
760
+ return mask_image_latents
761
+
762
+ def prepare_masked_image_latents(
763
+ self, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
764
+ ):
765
+ masked_image = masked_image.to(device=device, dtype=dtype)
766
+
767
+ # encode the mask image into latents space so we can concatenate it to the latents
768
+ if isinstance(generator, list):
769
+ masked_image_latents = [
770
+ self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
771
+ for i in range(batch_size)
772
+ ]
773
+ masked_image_latents = torch.cat(masked_image_latents, dim=0)
774
+ else:
775
+ masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
776
+ masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
777
+
778
+ # duplicate masked_image_latents for each generation per prompt, using mps friendly method
779
+ if masked_image_latents.shape[0] < batch_size:
780
+ if not batch_size % masked_image_latents.shape[0] == 0:
781
+ raise ValueError(
782
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
783
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
784
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
785
+ )
786
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
787
+
788
+ masked_image_latents = (
789
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
790
+ )
791
+
792
+ # aligning device to prevent device errors when concating it with the latent model input
793
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
794
+ return masked_image_latents
795
+
796
+ def _default_height_width(self, height, width, image):
797
+ if isinstance(image, list):
798
+ image = image[0]
799
+
800
+ if height is None:
801
+ if isinstance(image, PIL.Image.Image):
802
+ height = image.height
803
+ elif isinstance(image, torch.Tensor):
804
+ height = image.shape[3]
805
+
806
+ height = (height // 8) * 8 # round down to nearest multiple of 8
807
+
808
+ if width is None:
809
+ if isinstance(image, PIL.Image.Image):
810
+ width = image.width
811
+ elif isinstance(image, torch.Tensor):
812
+ width = image.shape[2]
813
+
814
+ width = (width // 8) * 8 # round down to nearest multiple of 8
815
+
816
+ return height, width
817
+
818
+ @torch.no_grad()
819
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
820
+ def __call__(
821
+ self,
822
+ prompt: Union[str, List[str]] = None,
823
+ image: Union[torch.Tensor, PIL.Image.Image] = None,
824
+ mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
825
+ controlnet_conditioning_image: Union[
826
+ torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]
827
+ ] = None,
828
+ height: Optional[int] = None,
829
+ width: Optional[int] = None,
830
+ num_inference_steps: int = 50,
831
+ guidance_scale: float = 7.5,
832
+ negative_prompt: Optional[Union[str, List[str]]] = None,
833
+ num_images_per_prompt: Optional[int] = 1,
834
+ eta: float = 0.0,
835
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
836
+ latents: Optional[torch.FloatTensor] = None,
837
+ prompt_embeds: Optional[torch.FloatTensor] = None,
838
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
839
+ output_type: Optional[str] = "pil",
840
+ return_dict: bool = True,
841
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
842
+ callback_steps: int = 1,
843
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
844
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
845
+ ):
846
+ r"""
847
+ Function invoked when calling the pipeline for generation.
848
+
849
+ Args:
850
+ prompt (`str` or `List[str]`, *optional*):
851
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
852
+ instead.
853
+ image (`torch.Tensor` or `PIL.Image.Image`):
854
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
855
+ be masked out with `mask_image` and repainted according to `prompt`.
856
+ mask_image (`torch.Tensor` or `PIL.Image.Image`):
857
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
858
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
859
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
860
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
861
+ controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
862
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
863
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
864
+ also be accepted as an image. The control image is automatically resized to fit the output image.
865
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
866
+ The height in pixels of the generated image.
867
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
868
+ The width in pixels of the generated image.
869
+ num_inference_steps (`int`, *optional*, defaults to 50):
870
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
871
+ expense of slower inference.
872
+ guidance_scale (`float`, *optional*, defaults to 7.5):
873
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
874
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
875
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
876
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
877
+ usually at the expense of lower image quality.
878
+ negative_prompt (`str` or `List[str]`, *optional*):
879
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
880
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
881
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
882
+ The number of images to generate per prompt.
883
+ eta (`float`, *optional*, defaults to 0.0):
884
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
885
+ [`schedulers.DDIMScheduler`], will be ignored for others.
886
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
887
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
888
+ to make generation deterministic.
889
+ latents (`torch.FloatTensor`, *optional*):
890
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
891
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
892
+ tensor will ge generated by sampling using the supplied random `generator`.
893
+ prompt_embeds (`torch.FloatTensor`, *optional*):
894
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
895
+ provided, text embeddings will be generated from `prompt` input argument.
896
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
897
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
898
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
899
+ argument.
900
+ output_type (`str`, *optional*, defaults to `"pil"`):
901
+ The output format of the generate image. Choose between
902
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
903
+ return_dict (`bool`, *optional*, defaults to `True`):
904
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
905
+ plain tuple.
906
+ callback (`Callable`, *optional*):
907
+ A function that will be called every `callback_steps` steps during inference. The function will be
908
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
909
+ callback_steps (`int`, *optional*, defaults to 1):
910
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
911
+ called at every step.
912
+ cross_attention_kwargs (`dict`, *optional*):
913
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
914
+ `self.processor` in
915
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
916
+ controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
917
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
918
+ to the residual in the original unet.
919
+
920
+ Examples:
921
+
922
+ Returns:
923
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
924
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
925
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
926
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
927
+ (nsfw) content, according to the `safety_checker`.
928
+ """
929
+ # 0. Default height and width to unet
930
+ height, width = self._default_height_width(height, width, controlnet_conditioning_image)
931
+
932
+ # 1. Check inputs. Raise error if not correct
933
+ self.check_inputs(
934
+ prompt,
935
+ image,
936
+ mask_image,
937
+ controlnet_conditioning_image,
938
+ height,
939
+ width,
940
+ callback_steps,
941
+ negative_prompt,
942
+ prompt_embeds,
943
+ negative_prompt_embeds,
944
+ controlnet_conditioning_scale,
945
+ )
946
+
947
+ # 2. Define call parameters
948
+ if prompt is not None and isinstance(prompt, str):
949
+ batch_size = 1
950
+ elif prompt is not None and isinstance(prompt, list):
951
+ batch_size = len(prompt)
952
+ else:
953
+ batch_size = prompt_embeds.shape[0]
954
+
955
+ device = self._execution_device
956
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
957
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
958
+ # corresponds to doing no classifier free guidance.
959
+ do_classifier_free_guidance = guidance_scale > 1.0
960
+
961
+ if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
962
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)
963
+
964
+ # 3. Encode input prompt
965
+ prompt_embeds = self._encode_prompt(
966
+ prompt,
967
+ device,
968
+ num_images_per_prompt,
969
+ do_classifier_free_guidance,
970
+ negative_prompt,
971
+ prompt_embeds=prompt_embeds,
972
+ negative_prompt_embeds=negative_prompt_embeds,
973
+ )
974
+
975
+ # 4. Prepare mask, image, and controlnet_conditioning_image
976
+ image = prepare_image(image)
977
+
978
+ mask_image = prepare_mask_image(mask_image)
979
+
980
+ # condition image(s)
981
+ if isinstance(self.controlnet, ControlNetModel):
982
+ controlnet_conditioning_image = prepare_controlnet_conditioning_image(
983
+ controlnet_conditioning_image=controlnet_conditioning_image,
984
+ width=width,
985
+ height=height,
986
+ batch_size=batch_size * num_images_per_prompt,
987
+ num_images_per_prompt=num_images_per_prompt,
988
+ device=device,
989
+ dtype=self.controlnet.dtype,
990
+ do_classifier_free_guidance=do_classifier_free_guidance,
991
+ )
992
+ elif isinstance(self.controlnet, MultiControlNetModel):
993
+ controlnet_conditioning_images = []
994
+
995
+ for image_ in controlnet_conditioning_image:
996
+ image_ = prepare_controlnet_conditioning_image(
997
+ controlnet_conditioning_image=image_,
998
+ width=width,
999
+ height=height,
1000
+ batch_size=batch_size * num_images_per_prompt,
1001
+ num_images_per_prompt=num_images_per_prompt,
1002
+ device=device,
1003
+ dtype=self.controlnet.dtype,
1004
+ do_classifier_free_guidance=do_classifier_free_guidance,
1005
+ )
1006
+ controlnet_conditioning_images.append(image_)
1007
+
1008
+ controlnet_conditioning_image = controlnet_conditioning_images
1009
+ else:
1010
+ assert False
1011
+
1012
+ masked_image = image * (mask_image < 0.5)
1013
+
1014
+ # 5. Prepare timesteps
1015
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1016
+ timesteps = self.scheduler.timesteps
1017
+
1018
+ # 6. Prepare latent variables
1019
+ num_channels_latents = self.vae.config.latent_channels
1020
+ latents = self.prepare_latents(
1021
+ batch_size * num_images_per_prompt,
1022
+ num_channels_latents,
1023
+ height,
1024
+ width,
1025
+ prompt_embeds.dtype,
1026
+ device,
1027
+ generator,
1028
+ latents,
1029
+ )
1030
+
1031
+ mask_image_latents = self.prepare_mask_latents(
1032
+ mask_image,
1033
+ batch_size * num_images_per_prompt,
1034
+ height,
1035
+ width,
1036
+ prompt_embeds.dtype,
1037
+ device,
1038
+ do_classifier_free_guidance,
1039
+ )
1040
+
1041
+ masked_image_latents = self.prepare_masked_image_latents(
1042
+ masked_image,
1043
+ batch_size * num_images_per_prompt,
1044
+ height,
1045
+ width,
1046
+ prompt_embeds.dtype,
1047
+ device,
1048
+ generator,
1049
+ do_classifier_free_guidance,
1050
+ )
1051
+
1052
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1053
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1054
+
1055
+ # 8. Denoising loop
1056
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1057
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1058
+ for i, t in enumerate(timesteps):
1059
+ # expand the latents if we are doing classifier free guidance
1060
+ non_inpainting_latent_model_input = (
1061
+ torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1062
+ )
1063
+
1064
+ non_inpainting_latent_model_input = self.scheduler.scale_model_input(
1065
+ non_inpainting_latent_model_input, t
1066
+ )
1067
+
1068
+ inpainting_latent_model_input = torch.cat(
1069
+ [non_inpainting_latent_model_input, mask_image_latents, masked_image_latents], dim=1
1070
+ )
1071
+
1072
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1073
+ non_inpainting_latent_model_input,
1074
+ t,
1075
+ encoder_hidden_states=prompt_embeds,
1076
+ controlnet_cond=controlnet_conditioning_image,
1077
+ conditioning_scale=controlnet_conditioning_scale,
1078
+ return_dict=False,
1079
+ )
1080
+
1081
+ # predict the noise residual
1082
+ noise_pred = self.unet(
1083
+ inpainting_latent_model_input,
1084
+ t,
1085
+ encoder_hidden_states=prompt_embeds,
1086
+ cross_attention_kwargs=cross_attention_kwargs,
1087
+ down_block_additional_residuals=down_block_res_samples,
1088
+ mid_block_additional_residual=mid_block_res_sample,
1089
+ ).sample
1090
+
1091
+ # perform guidance
1092
+ if do_classifier_free_guidance:
1093
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1094
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1095
+
1096
+ # compute the previous noisy sample x_t -> x_t-1
1097
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1098
+
1099
+ # call the callback, if provided
1100
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1101
+ progress_bar.update()
1102
+ if callback is not None and i % callback_steps == 0:
1103
+ step_idx = i // getattr(self.scheduler, "order", 1)
1104
+ callback(step_idx, t, latents)
1105
+
1106
+ # If we do sequential model offloading, let's offload unet and controlnet
1107
+ # manually for max memory savings
1108
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1109
+ self.unet.to("cpu")
1110
+ self.controlnet.to("cpu")
1111
+ torch.cuda.empty_cache()
1112
+
1113
+ if output_type == "latent":
1114
+ image = latents
1115
+ has_nsfw_concept = None
1116
+ elif output_type == "pil":
1117
+ # 8. Post-processing
1118
+ image = self.decode_latents(latents)
1119
+
1120
+ # 9. Run safety checker
1121
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1122
+
1123
+ # 10. Convert to PIL
1124
+ image = self.numpy_to_pil(image)
1125
+ else:
1126
+ # 8. Post-processing
1127
+ image = self.decode_latents(latents)
1128
+
1129
+ # 9. Run safety checker
1130
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1131
+
1132
+ # Offload last model to CPU
1133
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1134
+ self.final_offload_hook.offload()
1135
+
1136
+ if not return_dict:
1137
+ return (image, has_nsfw_concept)
1138
+
1139
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/stable_diffusion_controlnet_inpaint_img2img.py ADDED
@@ -0,0 +1,1120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
2
+
3
+ import inspect
4
+ from typing import Any, Callable, Dict, List, Optional, Union
5
+
6
+ import numpy as np
7
+ import PIL.Image
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
11
+
12
+ from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import KarrasDiffusionSchedulers
15
+ from diffusers.utils import (
16
+ PIL_INTERPOLATION,
17
+ is_accelerate_available,
18
+ is_accelerate_version,
19
+ replace_example_docstring,
20
+ )
21
+ from diffusers.utils.torch_utils import randn_tensor
22
+
23
+
24
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
25
+
26
+ EXAMPLE_DOC_STRING = """
27
+ Examples:
28
+ ```py
29
+ >>> import numpy as np
30
+ >>> import torch
31
+ >>> from PIL import Image
32
+ >>> from stable_diffusion_controlnet_inpaint_img2img import StableDiffusionControlNetInpaintImg2ImgPipeline
33
+
34
+ >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
35
+ >>> from diffusers import ControlNetModel, UniPCMultistepScheduler
36
+ >>> from diffusers.utils import load_image
37
+
38
+ >>> def ade_palette():
39
+ return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
40
+ [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
41
+ [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
42
+ [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
43
+ [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
44
+ [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
45
+ [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
46
+ [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
47
+ [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
48
+ [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
49
+ [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
50
+ [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
51
+ [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
52
+ [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
53
+ [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
54
+ [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
55
+ [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
56
+ [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
57
+ [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
58
+ [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
59
+ [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
60
+ [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
61
+ [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
62
+ [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
63
+ [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
64
+ [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
65
+ [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
66
+ [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
67
+ [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
68
+ [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
69
+ [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
70
+ [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
71
+ [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
72
+ [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
73
+ [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
74
+ [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
75
+ [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
76
+ [102, 255, 0], [92, 0, 255]]
77
+
78
+ >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
79
+ >>> image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
80
+
81
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
82
+
83
+ >>> pipe = StableDiffusionControlNetInpaintImg2ImgPipeline.from_pretrained(
84
+ "runwayml/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
85
+ )
86
+
87
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
88
+ >>> pipe.enable_xformers_memory_efficient_attention()
89
+ >>> pipe.enable_model_cpu_offload()
90
+
91
+ >>> def image_to_seg(image):
92
+ pixel_values = image_processor(image, return_tensors="pt").pixel_values
93
+ with torch.no_grad():
94
+ outputs = image_segmentor(pixel_values)
95
+ seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
96
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
97
+ palette = np.array(ade_palette())
98
+ for label, color in enumerate(palette):
99
+ color_seg[seg == label, :] = color
100
+ color_seg = color_seg.astype(np.uint8)
101
+ seg_image = Image.fromarray(color_seg)
102
+ return seg_image
103
+
104
+ >>> image = load_image(
105
+ "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
106
+ )
107
+
108
+ >>> mask_image = load_image(
109
+ "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
110
+ )
111
+
112
+ >>> controlnet_conditioning_image = image_to_seg(image)
113
+
114
+ >>> image = pipe(
115
+ "Face of a yellow cat, high resolution, sitting on a park bench",
116
+ image,
117
+ mask_image,
118
+ controlnet_conditioning_image,
119
+ num_inference_steps=20,
120
+ ).images[0]
121
+
122
+ >>> image.save("out.png")
123
+ ```
124
+ """
125
+
126
+
127
+ def prepare_image(image):
128
+ if isinstance(image, torch.Tensor):
129
+ # Batch single image
130
+ if image.ndim == 3:
131
+ image = image.unsqueeze(0)
132
+
133
+ image = image.to(dtype=torch.float32)
134
+ else:
135
+ # preprocess image
136
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
137
+ image = [image]
138
+
139
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
140
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
141
+ image = np.concatenate(image, axis=0)
142
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
143
+ image = np.concatenate([i[None, :] for i in image], axis=0)
144
+
145
+ image = image.transpose(0, 3, 1, 2)
146
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
147
+
148
+ return image
149
+
150
+
151
+ def prepare_mask_image(mask_image):
152
+ if isinstance(mask_image, torch.Tensor):
153
+ if mask_image.ndim == 2:
154
+ # Batch and add channel dim for single mask
155
+ mask_image = mask_image.unsqueeze(0).unsqueeze(0)
156
+ elif mask_image.ndim == 3 and mask_image.shape[0] == 1:
157
+ # Single mask, the 0'th dimension is considered to be
158
+ # the existing batch size of 1
159
+ mask_image = mask_image.unsqueeze(0)
160
+ elif mask_image.ndim == 3 and mask_image.shape[0] != 1:
161
+ # Batch of mask, the 0'th dimension is considered to be
162
+ # the batching dimension
163
+ mask_image = mask_image.unsqueeze(1)
164
+
165
+ # Binarize mask
166
+ mask_image[mask_image < 0.5] = 0
167
+ mask_image[mask_image >= 0.5] = 1
168
+ else:
169
+ # preprocess mask
170
+ if isinstance(mask_image, (PIL.Image.Image, np.ndarray)):
171
+ mask_image = [mask_image]
172
+
173
+ if isinstance(mask_image, list) and isinstance(mask_image[0], PIL.Image.Image):
174
+ mask_image = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask_image], axis=0)
175
+ mask_image = mask_image.astype(np.float32) / 255.0
176
+ elif isinstance(mask_image, list) and isinstance(mask_image[0], np.ndarray):
177
+ mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0)
178
+
179
+ mask_image[mask_image < 0.5] = 0
180
+ mask_image[mask_image >= 0.5] = 1
181
+ mask_image = torch.from_numpy(mask_image)
182
+
183
+ return mask_image
184
+
185
+
186
+ def prepare_controlnet_conditioning_image(
187
+ controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype
188
+ ):
189
+ if not isinstance(controlnet_conditioning_image, torch.Tensor):
190
+ if isinstance(controlnet_conditioning_image, PIL.Image.Image):
191
+ controlnet_conditioning_image = [controlnet_conditioning_image]
192
+
193
+ if isinstance(controlnet_conditioning_image[0], PIL.Image.Image):
194
+ controlnet_conditioning_image = [
195
+ np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :]
196
+ for i in controlnet_conditioning_image
197
+ ]
198
+ controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0)
199
+ controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0
200
+ controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2)
201
+ controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image)
202
+ elif isinstance(controlnet_conditioning_image[0], torch.Tensor):
203
+ controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0)
204
+
205
+ image_batch_size = controlnet_conditioning_image.shape[0]
206
+
207
+ if image_batch_size == 1:
208
+ repeat_by = batch_size
209
+ else:
210
+ # image batch size is the same as prompt batch size
211
+ repeat_by = num_images_per_prompt
212
+
213
+ controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0)
214
+
215
+ controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
216
+
217
+ return controlnet_conditioning_image
218
+
219
+
220
+ class StableDiffusionControlNetInpaintImg2ImgPipeline(DiffusionPipeline):
221
+ """
222
+ Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
223
+ """
224
+
225
+ _optional_components = ["safety_checker", "feature_extractor"]
226
+
227
+ def __init__(
228
+ self,
229
+ vae: AutoencoderKL,
230
+ text_encoder: CLIPTextModel,
231
+ tokenizer: CLIPTokenizer,
232
+ unet: UNet2DConditionModel,
233
+ controlnet: ControlNetModel,
234
+ scheduler: KarrasDiffusionSchedulers,
235
+ safety_checker: StableDiffusionSafetyChecker,
236
+ feature_extractor: CLIPImageProcessor,
237
+ requires_safety_checker: bool = True,
238
+ ):
239
+ super().__init__()
240
+
241
+ if safety_checker is None and requires_safety_checker:
242
+ logger.warning(
243
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
244
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
245
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
246
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
247
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
248
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
249
+ )
250
+
251
+ if safety_checker is not None and feature_extractor is None:
252
+ raise ValueError(
253
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
254
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
255
+ )
256
+
257
+ self.register_modules(
258
+ vae=vae,
259
+ text_encoder=text_encoder,
260
+ tokenizer=tokenizer,
261
+ unet=unet,
262
+ controlnet=controlnet,
263
+ scheduler=scheduler,
264
+ safety_checker=safety_checker,
265
+ feature_extractor=feature_extractor,
266
+ )
267
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
268
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
269
+
270
+ def enable_vae_slicing(self):
271
+ r"""
272
+ Enable sliced VAE decoding.
273
+
274
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
275
+ steps. This is useful to save some memory and allow larger batch sizes.
276
+ """
277
+ self.vae.enable_slicing()
278
+
279
+ def disable_vae_slicing(self):
280
+ r"""
281
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
282
+ computing decoding in one step.
283
+ """
284
+ self.vae.disable_slicing()
285
+
286
+ def enable_sequential_cpu_offload(self, gpu_id=0):
287
+ r"""
288
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
289
+ text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
290
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
291
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
292
+ `enable_model_cpu_offload`, but performance is lower.
293
+ """
294
+ if is_accelerate_available():
295
+ from accelerate import cpu_offload
296
+ else:
297
+ raise ImportError("Please install accelerate via `pip install accelerate`")
298
+
299
+ device = torch.device(f"cuda:{gpu_id}")
300
+
301
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
302
+ cpu_offload(cpu_offloaded_model, device)
303
+
304
+ if self.safety_checker is not None:
305
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
306
+
307
+ def enable_model_cpu_offload(self, gpu_id=0):
308
+ r"""
309
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
310
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
311
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
312
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
313
+ """
314
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
315
+ from accelerate import cpu_offload_with_hook
316
+ else:
317
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
318
+
319
+ device = torch.device(f"cuda:{gpu_id}")
320
+
321
+ hook = None
322
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
323
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
324
+
325
+ if self.safety_checker is not None:
326
+ # the safety checker can offload the vae again
327
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
328
+
329
+ # control net hook has be manually offloaded as it alternates with unet
330
+ cpu_offload_with_hook(self.controlnet, device)
331
+
332
+ # We'll offload the last model manually.
333
+ self.final_offload_hook = hook
334
+
335
+ @property
336
+ def _execution_device(self):
337
+ r"""
338
+ Returns the device on which the pipeline's models will be executed. After calling
339
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
340
+ hooks.
341
+ """
342
+ if not hasattr(self.unet, "_hf_hook"):
343
+ return self.device
344
+ for module in self.unet.modules():
345
+ if (
346
+ hasattr(module, "_hf_hook")
347
+ and hasattr(module._hf_hook, "execution_device")
348
+ and module._hf_hook.execution_device is not None
349
+ ):
350
+ return torch.device(module._hf_hook.execution_device)
351
+ return self.device
352
+
353
+ def _encode_prompt(
354
+ self,
355
+ prompt,
356
+ device,
357
+ num_images_per_prompt,
358
+ do_classifier_free_guidance,
359
+ negative_prompt=None,
360
+ prompt_embeds: Optional[torch.FloatTensor] = None,
361
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
362
+ ):
363
+ r"""
364
+ Encodes the prompt into text encoder hidden states.
365
+
366
+ Args:
367
+ prompt (`str` or `List[str]`, *optional*):
368
+ prompt to be encoded
369
+ device: (`torch.device`):
370
+ torch device
371
+ num_images_per_prompt (`int`):
372
+ number of images that should be generated per prompt
373
+ do_classifier_free_guidance (`bool`):
374
+ whether to use classifier free guidance or not
375
+ negative_prompt (`str` or `List[str]`, *optional*):
376
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
377
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
378
+ prompt_embeds (`torch.FloatTensor`, *optional*):
379
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
380
+ provided, text embeddings will be generated from `prompt` input argument.
381
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
382
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
383
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
384
+ argument.
385
+ """
386
+ if prompt is not None and isinstance(prompt, str):
387
+ batch_size = 1
388
+ elif prompt is not None and isinstance(prompt, list):
389
+ batch_size = len(prompt)
390
+ else:
391
+ batch_size = prompt_embeds.shape[0]
392
+
393
+ if prompt_embeds is None:
394
+ text_inputs = self.tokenizer(
395
+ prompt,
396
+ padding="max_length",
397
+ max_length=self.tokenizer.model_max_length,
398
+ truncation=True,
399
+ return_tensors="pt",
400
+ )
401
+ text_input_ids = text_inputs.input_ids
402
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
403
+
404
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
405
+ text_input_ids, untruncated_ids
406
+ ):
407
+ removed_text = self.tokenizer.batch_decode(
408
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
409
+ )
410
+ logger.warning(
411
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
412
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
413
+ )
414
+
415
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
416
+ attention_mask = text_inputs.attention_mask.to(device)
417
+ else:
418
+ attention_mask = None
419
+
420
+ prompt_embeds = self.text_encoder(
421
+ text_input_ids.to(device),
422
+ attention_mask=attention_mask,
423
+ )
424
+ prompt_embeds = prompt_embeds[0]
425
+
426
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
427
+
428
+ bs_embed, seq_len, _ = prompt_embeds.shape
429
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
430
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
431
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
432
+
433
+ # get unconditional embeddings for classifier free guidance
434
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
435
+ uncond_tokens: List[str]
436
+ if negative_prompt is None:
437
+ uncond_tokens = [""] * batch_size
438
+ elif type(prompt) is not type(negative_prompt):
439
+ raise TypeError(
440
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
441
+ f" {type(prompt)}."
442
+ )
443
+ elif isinstance(negative_prompt, str):
444
+ uncond_tokens = [negative_prompt]
445
+ elif batch_size != len(negative_prompt):
446
+ raise ValueError(
447
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
448
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
449
+ " the batch size of `prompt`."
450
+ )
451
+ else:
452
+ uncond_tokens = negative_prompt
453
+
454
+ max_length = prompt_embeds.shape[1]
455
+ uncond_input = self.tokenizer(
456
+ uncond_tokens,
457
+ padding="max_length",
458
+ max_length=max_length,
459
+ truncation=True,
460
+ return_tensors="pt",
461
+ )
462
+
463
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
464
+ attention_mask = uncond_input.attention_mask.to(device)
465
+ else:
466
+ attention_mask = None
467
+
468
+ negative_prompt_embeds = self.text_encoder(
469
+ uncond_input.input_ids.to(device),
470
+ attention_mask=attention_mask,
471
+ )
472
+ negative_prompt_embeds = negative_prompt_embeds[0]
473
+
474
+ if do_classifier_free_guidance:
475
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
476
+ seq_len = negative_prompt_embeds.shape[1]
477
+
478
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
479
+
480
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
481
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
482
+
483
+ # For classifier free guidance, we need to do two forward passes.
484
+ # Here we concatenate the unconditional and text embeddings into a single batch
485
+ # to avoid doing two forward passes
486
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
487
+
488
+ return prompt_embeds
489
+
490
+ def run_safety_checker(self, image, device, dtype):
491
+ if self.safety_checker is not None:
492
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
493
+ image, has_nsfw_concept = self.safety_checker(
494
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
495
+ )
496
+ else:
497
+ has_nsfw_concept = None
498
+ return image, has_nsfw_concept
499
+
500
+ def decode_latents(self, latents):
501
+ latents = 1 / self.vae.config.scaling_factor * latents
502
+ image = self.vae.decode(latents).sample
503
+ image = (image / 2 + 0.5).clamp(0, 1)
504
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
505
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
506
+ return image
507
+
508
+ def prepare_extra_step_kwargs(self, generator, eta):
509
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
510
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
511
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
512
+ # and should be between [0, 1]
513
+
514
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
515
+ extra_step_kwargs = {}
516
+ if accepts_eta:
517
+ extra_step_kwargs["eta"] = eta
518
+
519
+ # check if the scheduler accepts generator
520
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
521
+ if accepts_generator:
522
+ extra_step_kwargs["generator"] = generator
523
+ return extra_step_kwargs
524
+
525
+ def check_inputs(
526
+ self,
527
+ prompt,
528
+ image,
529
+ mask_image,
530
+ controlnet_conditioning_image,
531
+ height,
532
+ width,
533
+ callback_steps,
534
+ negative_prompt=None,
535
+ prompt_embeds=None,
536
+ negative_prompt_embeds=None,
537
+ strength=None,
538
+ ):
539
+ if height % 8 != 0 or width % 8 != 0:
540
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
541
+
542
+ if (callback_steps is None) or (
543
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
544
+ ):
545
+ raise ValueError(
546
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
547
+ f" {type(callback_steps)}."
548
+ )
549
+
550
+ if prompt is not None and prompt_embeds is not None:
551
+ raise ValueError(
552
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
553
+ " only forward one of the two."
554
+ )
555
+ elif prompt is None and prompt_embeds is None:
556
+ raise ValueError(
557
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
558
+ )
559
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
560
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
561
+
562
+ if negative_prompt is not None and negative_prompt_embeds is not None:
563
+ raise ValueError(
564
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
565
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
566
+ )
567
+
568
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
569
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
570
+ raise ValueError(
571
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
572
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
573
+ f" {negative_prompt_embeds.shape}."
574
+ )
575
+
576
+ controlnet_cond_image_is_pil = isinstance(controlnet_conditioning_image, PIL.Image.Image)
577
+ controlnet_cond_image_is_tensor = isinstance(controlnet_conditioning_image, torch.Tensor)
578
+ controlnet_cond_image_is_pil_list = isinstance(controlnet_conditioning_image, list) and isinstance(
579
+ controlnet_conditioning_image[0], PIL.Image.Image
580
+ )
581
+ controlnet_cond_image_is_tensor_list = isinstance(controlnet_conditioning_image, list) and isinstance(
582
+ controlnet_conditioning_image[0], torch.Tensor
583
+ )
584
+
585
+ if (
586
+ not controlnet_cond_image_is_pil
587
+ and not controlnet_cond_image_is_tensor
588
+ and not controlnet_cond_image_is_pil_list
589
+ and not controlnet_cond_image_is_tensor_list
590
+ ):
591
+ raise TypeError(
592
+ "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
593
+ )
594
+
595
+ if controlnet_cond_image_is_pil:
596
+ controlnet_cond_image_batch_size = 1
597
+ elif controlnet_cond_image_is_tensor:
598
+ controlnet_cond_image_batch_size = controlnet_conditioning_image.shape[0]
599
+ elif controlnet_cond_image_is_pil_list:
600
+ controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
601
+ elif controlnet_cond_image_is_tensor_list:
602
+ controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
603
+
604
+ if prompt is not None and isinstance(prompt, str):
605
+ prompt_batch_size = 1
606
+ elif prompt is not None and isinstance(prompt, list):
607
+ prompt_batch_size = len(prompt)
608
+ elif prompt_embeds is not None:
609
+ prompt_batch_size = prompt_embeds.shape[0]
610
+
611
+ if controlnet_cond_image_batch_size != 1 and controlnet_cond_image_batch_size != prompt_batch_size:
612
+ raise ValueError(
613
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {controlnet_cond_image_batch_size}, prompt batch size: {prompt_batch_size}"
614
+ )
615
+
616
+ if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor):
617
+ raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor")
618
+
619
+ if isinstance(image, PIL.Image.Image) and not isinstance(mask_image, PIL.Image.Image):
620
+ raise TypeError("if `image` is a PIL image, `mask_image` must also be a PIL image")
621
+
622
+ if isinstance(image, torch.Tensor):
623
+ if image.ndim != 3 and image.ndim != 4:
624
+ raise ValueError("`image` must have 3 or 4 dimensions")
625
+
626
+ if mask_image.ndim != 2 and mask_image.ndim != 3 and mask_image.ndim != 4:
627
+ raise ValueError("`mask_image` must have 2, 3, or 4 dimensions")
628
+
629
+ if image.ndim == 3:
630
+ image_batch_size = 1
631
+ image_channels, image_height, image_width = image.shape
632
+ elif image.ndim == 4:
633
+ image_batch_size, image_channels, image_height, image_width = image.shape
634
+
635
+ if mask_image.ndim == 2:
636
+ mask_image_batch_size = 1
637
+ mask_image_channels = 1
638
+ mask_image_height, mask_image_width = mask_image.shape
639
+ elif mask_image.ndim == 3:
640
+ mask_image_channels = 1
641
+ mask_image_batch_size, mask_image_height, mask_image_width = mask_image.shape
642
+ elif mask_image.ndim == 4:
643
+ mask_image_batch_size, mask_image_channels, mask_image_height, mask_image_width = mask_image.shape
644
+
645
+ if image_channels != 3:
646
+ raise ValueError("`image` must have 3 channels")
647
+
648
+ if mask_image_channels != 1:
649
+ raise ValueError("`mask_image` must have 1 channel")
650
+
651
+ if image_batch_size != mask_image_batch_size:
652
+ raise ValueError("`image` and `mask_image` mush have the same batch sizes")
653
+
654
+ if image_height != mask_image_height or image_width != mask_image_width:
655
+ raise ValueError("`image` and `mask_image` must have the same height and width dimensions")
656
+
657
+ if image.min() < -1 or image.max() > 1:
658
+ raise ValueError("`image` should be in range [-1, 1]")
659
+
660
+ if mask_image.min() < 0 or mask_image.max() > 1:
661
+ raise ValueError("`mask_image` should be in range [0, 1]")
662
+ else:
663
+ mask_image_channels = 1
664
+ image_channels = 3
665
+
666
+ single_image_latent_channels = self.vae.config.latent_channels
667
+
668
+ total_latent_channels = single_image_latent_channels * 2 + mask_image_channels
669
+
670
+ if total_latent_channels != self.unet.config.in_channels:
671
+ raise ValueError(
672
+ f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received"
673
+ f" non inpainting latent channels: {single_image_latent_channels},"
674
+ f" mask channels: {mask_image_channels}, and masked image channels: {single_image_latent_channels}."
675
+ f" Please verify the config of `pipeline.unet` and the `mask_image` and `image` inputs."
676
+ )
677
+
678
+ if strength < 0 or strength > 1:
679
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
680
+
681
+ def get_timesteps(self, num_inference_steps, strength, device):
682
+ # get the original timestep using init_timestep
683
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
684
+
685
+ t_start = max(num_inference_steps - init_timestep, 0)
686
+ timesteps = self.scheduler.timesteps[t_start:]
687
+
688
+ return timesteps, num_inference_steps - t_start
689
+
690
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
691
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
692
+ raise ValueError(
693
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
694
+ )
695
+
696
+ image = image.to(device=device, dtype=dtype)
697
+
698
+ batch_size = batch_size * num_images_per_prompt
699
+ if isinstance(generator, list) and len(generator) != batch_size:
700
+ raise ValueError(
701
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
702
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
703
+ )
704
+
705
+ if isinstance(generator, list):
706
+ init_latents = [
707
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
708
+ ]
709
+ init_latents = torch.cat(init_latents, dim=0)
710
+ else:
711
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
712
+
713
+ init_latents = self.vae.config.scaling_factor * init_latents
714
+
715
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
716
+ raise ValueError(
717
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
718
+ )
719
+ else:
720
+ init_latents = torch.cat([init_latents], dim=0)
721
+
722
+ shape = init_latents.shape
723
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
724
+
725
+ # get latents
726
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
727
+ latents = init_latents
728
+
729
+ return latents
730
+
731
+ def prepare_mask_latents(self, mask_image, batch_size, height, width, dtype, device, do_classifier_free_guidance):
732
+ # resize the mask to latents shape as we concatenate the mask to the latents
733
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
734
+ # and half precision
735
+ mask_image = F.interpolate(mask_image, size=(height // self.vae_scale_factor, width // self.vae_scale_factor))
736
+ mask_image = mask_image.to(device=device, dtype=dtype)
737
+
738
+ # duplicate mask for each generation per prompt, using mps friendly method
739
+ if mask_image.shape[0] < batch_size:
740
+ if not batch_size % mask_image.shape[0] == 0:
741
+ raise ValueError(
742
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
743
+ f" a total batch size of {batch_size}, but {mask_image.shape[0]} masks were passed. Make sure the number"
744
+ " of masks that you pass is divisible by the total requested batch size."
745
+ )
746
+ mask_image = mask_image.repeat(batch_size // mask_image.shape[0], 1, 1, 1)
747
+
748
+ mask_image = torch.cat([mask_image] * 2) if do_classifier_free_guidance else mask_image
749
+
750
+ mask_image_latents = mask_image
751
+
752
+ return mask_image_latents
753
+
754
+ def prepare_masked_image_latents(
755
+ self, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
756
+ ):
757
+ masked_image = masked_image.to(device=device, dtype=dtype)
758
+
759
+ # encode the mask image into latents space so we can concatenate it to the latents
760
+ if isinstance(generator, list):
761
+ masked_image_latents = [
762
+ self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
763
+ for i in range(batch_size)
764
+ ]
765
+ masked_image_latents = torch.cat(masked_image_latents, dim=0)
766
+ else:
767
+ masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
768
+ masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
769
+
770
+ # duplicate masked_image_latents for each generation per prompt, using mps friendly method
771
+ if masked_image_latents.shape[0] < batch_size:
772
+ if not batch_size % masked_image_latents.shape[0] == 0:
773
+ raise ValueError(
774
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
775
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
776
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
777
+ )
778
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
779
+
780
+ masked_image_latents = (
781
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
782
+ )
783
+
784
+ # aligning device to prevent device errors when concating it with the latent model input
785
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
786
+ return masked_image_latents
787
+
788
+ def _default_height_width(self, height, width, image):
789
+ if isinstance(image, list):
790
+ image = image[0]
791
+
792
+ if height is None:
793
+ if isinstance(image, PIL.Image.Image):
794
+ height = image.height
795
+ elif isinstance(image, torch.Tensor):
796
+ height = image.shape[3]
797
+
798
+ height = (height // 8) * 8 # round down to nearest multiple of 8
799
+
800
+ if width is None:
801
+ if isinstance(image, PIL.Image.Image):
802
+ width = image.width
803
+ elif isinstance(image, torch.Tensor):
804
+ width = image.shape[2]
805
+
806
+ width = (width // 8) * 8 # round down to nearest multiple of 8
807
+
808
+ return height, width
809
+
810
+ @torch.no_grad()
811
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
812
+ def __call__(
813
+ self,
814
+ prompt: Union[str, List[str]] = None,
815
+ image: Union[torch.Tensor, PIL.Image.Image] = None,
816
+ mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
817
+ controlnet_conditioning_image: Union[
818
+ torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]
819
+ ] = None,
820
+ strength: float = 0.8,
821
+ height: Optional[int] = None,
822
+ width: Optional[int] = None,
823
+ num_inference_steps: int = 50,
824
+ guidance_scale: float = 7.5,
825
+ negative_prompt: Optional[Union[str, List[str]]] = None,
826
+ num_images_per_prompt: Optional[int] = 1,
827
+ eta: float = 0.0,
828
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
829
+ latents: Optional[torch.FloatTensor] = None,
830
+ prompt_embeds: Optional[torch.FloatTensor] = None,
831
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
832
+ output_type: Optional[str] = "pil",
833
+ return_dict: bool = True,
834
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
835
+ callback_steps: int = 1,
836
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
837
+ controlnet_conditioning_scale: float = 1.0,
838
+ ):
839
+ r"""
840
+ Function invoked when calling the pipeline for generation.
841
+
842
+ Args:
843
+ prompt (`str` or `List[str]`, *optional*):
844
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
845
+ instead.
846
+ image (`torch.Tensor` or `PIL.Image.Image`):
847
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
848
+ be masked out with `mask_image` and repainted according to `prompt`.
849
+ mask_image (`torch.Tensor` or `PIL.Image.Image`):
850
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
851
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
852
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
853
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
854
+ controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
855
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
856
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
857
+ also be accepted as an image. The control image is automatically resized to fit the output image.
858
+ strength (`float`, *optional*):
859
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
860
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
861
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
862
+ be maximum and the denoising process will run for the full number of iterations specified in
863
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
864
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
865
+ The height in pixels of the generated image.
866
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
867
+ The width in pixels of the generated image.
868
+ num_inference_steps (`int`, *optional*, defaults to 50):
869
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
870
+ expense of slower inference.
871
+ guidance_scale (`float`, *optional*, defaults to 7.5):
872
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
873
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
874
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
875
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
876
+ usually at the expense of lower image quality.
877
+ negative_prompt (`str` or `List[str]`, *optional*):
878
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
879
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
880
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
881
+ The number of images to generate per prompt.
882
+ eta (`float`, *optional*, defaults to 0.0):
883
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
884
+ [`schedulers.DDIMScheduler`], will be ignored for others.
885
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
886
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
887
+ to make generation deterministic.
888
+ latents (`torch.FloatTensor`, *optional*):
889
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
890
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
891
+ tensor will ge generated by sampling using the supplied random `generator`.
892
+ prompt_embeds (`torch.FloatTensor`, *optional*):
893
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
894
+ provided, text embeddings will be generated from `prompt` input argument.
895
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
896
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
897
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
898
+ argument.
899
+ output_type (`str`, *optional*, defaults to `"pil"`):
900
+ The output format of the generate image. Choose between
901
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
902
+ return_dict (`bool`, *optional*, defaults to `True`):
903
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
904
+ plain tuple.
905
+ callback (`Callable`, *optional*):
906
+ A function that will be called every `callback_steps` steps during inference. The function will be
907
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
908
+ callback_steps (`int`, *optional*, defaults to 1):
909
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
910
+ called at every step.
911
+ cross_attention_kwargs (`dict`, *optional*):
912
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
913
+ `self.processor` in
914
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
915
+ controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
916
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
917
+ to the residual in the original unet.
918
+
919
+ Examples:
920
+
921
+ Returns:
922
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
923
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
924
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
925
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
926
+ (nsfw) content, according to the `safety_checker`.
927
+ """
928
+ # 0. Default height and width to unet
929
+ height, width = self._default_height_width(height, width, controlnet_conditioning_image)
930
+
931
+ # 1. Check inputs. Raise error if not correct
932
+ self.check_inputs(
933
+ prompt,
934
+ image,
935
+ mask_image,
936
+ controlnet_conditioning_image,
937
+ height,
938
+ width,
939
+ callback_steps,
940
+ negative_prompt,
941
+ prompt_embeds,
942
+ negative_prompt_embeds,
943
+ strength,
944
+ )
945
+
946
+ # 2. Define call parameters
947
+ if prompt is not None and isinstance(prompt, str):
948
+ batch_size = 1
949
+ elif prompt is not None and isinstance(prompt, list):
950
+ batch_size = len(prompt)
951
+ else:
952
+ batch_size = prompt_embeds.shape[0]
953
+
954
+ device = self._execution_device
955
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
956
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
957
+ # corresponds to doing no classifier free guidance.
958
+ do_classifier_free_guidance = guidance_scale > 1.0
959
+
960
+ # 3. Encode input prompt
961
+ prompt_embeds = self._encode_prompt(
962
+ prompt,
963
+ device,
964
+ num_images_per_prompt,
965
+ do_classifier_free_guidance,
966
+ negative_prompt,
967
+ prompt_embeds=prompt_embeds,
968
+ negative_prompt_embeds=negative_prompt_embeds,
969
+ )
970
+
971
+ # 4. Prepare mask, image, and controlnet_conditioning_image
972
+ image = prepare_image(image)
973
+
974
+ mask_image = prepare_mask_image(mask_image)
975
+
976
+ controlnet_conditioning_image = prepare_controlnet_conditioning_image(
977
+ controlnet_conditioning_image,
978
+ width,
979
+ height,
980
+ batch_size * num_images_per_prompt,
981
+ num_images_per_prompt,
982
+ device,
983
+ self.controlnet.dtype,
984
+ )
985
+
986
+ masked_image = image * (mask_image < 0.5)
987
+
988
+ # 5. Prepare timesteps
989
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
990
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
991
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
992
+
993
+ # 6. Prepare latent variables
994
+ latents = self.prepare_latents(
995
+ image,
996
+ latent_timestep,
997
+ batch_size,
998
+ num_images_per_prompt,
999
+ prompt_embeds.dtype,
1000
+ device,
1001
+ generator,
1002
+ )
1003
+
1004
+ mask_image_latents = self.prepare_mask_latents(
1005
+ mask_image,
1006
+ batch_size * num_images_per_prompt,
1007
+ height,
1008
+ width,
1009
+ prompt_embeds.dtype,
1010
+ device,
1011
+ do_classifier_free_guidance,
1012
+ )
1013
+
1014
+ masked_image_latents = self.prepare_masked_image_latents(
1015
+ masked_image,
1016
+ batch_size * num_images_per_prompt,
1017
+ height,
1018
+ width,
1019
+ prompt_embeds.dtype,
1020
+ device,
1021
+ generator,
1022
+ do_classifier_free_guidance,
1023
+ )
1024
+
1025
+ if do_classifier_free_guidance:
1026
+ controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
1027
+
1028
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1029
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1030
+
1031
+ # 8. Denoising loop
1032
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1033
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1034
+ for i, t in enumerate(timesteps):
1035
+ # expand the latents if we are doing classifier free guidance
1036
+ non_inpainting_latent_model_input = (
1037
+ torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1038
+ )
1039
+
1040
+ non_inpainting_latent_model_input = self.scheduler.scale_model_input(
1041
+ non_inpainting_latent_model_input, t
1042
+ )
1043
+
1044
+ inpainting_latent_model_input = torch.cat(
1045
+ [non_inpainting_latent_model_input, mask_image_latents, masked_image_latents], dim=1
1046
+ )
1047
+
1048
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1049
+ non_inpainting_latent_model_input,
1050
+ t,
1051
+ encoder_hidden_states=prompt_embeds,
1052
+ controlnet_cond=controlnet_conditioning_image,
1053
+ return_dict=False,
1054
+ )
1055
+
1056
+ down_block_res_samples = [
1057
+ down_block_res_sample * controlnet_conditioning_scale
1058
+ for down_block_res_sample in down_block_res_samples
1059
+ ]
1060
+ mid_block_res_sample *= controlnet_conditioning_scale
1061
+
1062
+ # predict the noise residual
1063
+ noise_pred = self.unet(
1064
+ inpainting_latent_model_input,
1065
+ t,
1066
+ encoder_hidden_states=prompt_embeds,
1067
+ cross_attention_kwargs=cross_attention_kwargs,
1068
+ down_block_additional_residuals=down_block_res_samples,
1069
+ mid_block_additional_residual=mid_block_res_sample,
1070
+ ).sample
1071
+
1072
+ # perform guidance
1073
+ if do_classifier_free_guidance:
1074
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1075
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1076
+
1077
+ # compute the previous noisy sample x_t -> x_t-1
1078
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1079
+
1080
+ # call the callback, if provided
1081
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1082
+ progress_bar.update()
1083
+ if callback is not None and i % callback_steps == 0:
1084
+ step_idx = i // getattr(self.scheduler, "order", 1)
1085
+ callback(step_idx, t, latents)
1086
+
1087
+ # If we do sequential model offloading, let's offload unet and controlnet
1088
+ # manually for max memory savings
1089
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1090
+ self.unet.to("cpu")
1091
+ self.controlnet.to("cpu")
1092
+ torch.cuda.empty_cache()
1093
+
1094
+ if output_type == "latent":
1095
+ image = latents
1096
+ has_nsfw_concept = None
1097
+ elif output_type == "pil":
1098
+ # 8. Post-processing
1099
+ image = self.decode_latents(latents)
1100
+
1101
+ # 9. Run safety checker
1102
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1103
+
1104
+ # 10. Convert to PIL
1105
+ image = self.numpy_to_pil(image)
1106
+ else:
1107
+ # 8. Post-processing
1108
+ image = self.decode_latents(latents)
1109
+
1110
+ # 9. Run safety checker
1111
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1112
+
1113
+ # Offload last model to CPU
1114
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1115
+ self.final_offload_hook.offload()
1116
+
1117
+ if not return_dict:
1118
+ return (image, has_nsfw_concept)
1119
+
1120
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/stable_diffusion_controlnet_reference.py ADDED
@@ -0,0 +1,838 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inspired by: https://github.com/Mikubill/sd-webui-controlnet/discussions/1236 and https://github.com/Mikubill/sd-webui-controlnet/discussions/1280
2
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+ import torch
7
+
8
+ from diffusers import StableDiffusionControlNetPipeline
9
+ from diffusers.models import ControlNetModel
10
+ from diffusers.models.attention import BasicTransformerBlock
11
+ from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D
12
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
+ from diffusers.utils import logging
15
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
16
+
17
+
18
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
19
+
20
+ EXAMPLE_DOC_STRING = """
21
+ Examples:
22
+ ```py
23
+ >>> import cv2
24
+ >>> import torch
25
+ >>> import numpy as np
26
+ >>> from PIL import Image
27
+ >>> from diffusers import UniPCMultistepScheduler
28
+ >>> from diffusers.utils import load_image
29
+
30
+ >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
31
+
32
+ >>> # get canny image
33
+ >>> image = cv2.Canny(np.array(input_image), 100, 200)
34
+ >>> image = image[:, :, None]
35
+ >>> image = np.concatenate([image, image, image], axis=2)
36
+ >>> canny_image = Image.fromarray(image)
37
+
38
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
39
+ >>> pipe = StableDiffusionControlNetReferencePipeline.from_pretrained(
40
+ "runwayml/stable-diffusion-v1-5",
41
+ controlnet=controlnet,
42
+ safety_checker=None,
43
+ torch_dtype=torch.float16
44
+ ).to('cuda:0')
45
+
46
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config)
47
+
48
+ >>> result_img = pipe(ref_image=input_image,
49
+ prompt="1girl",
50
+ image=canny_image,
51
+ num_inference_steps=20,
52
+ reference_attn=True,
53
+ reference_adain=True).images[0]
54
+
55
+ >>> result_img.show()
56
+ ```
57
+ """
58
+
59
+
60
+ def torch_dfs(model: torch.nn.Module):
61
+ result = [model]
62
+ for child in model.children():
63
+ result += torch_dfs(child)
64
+ return result
65
+
66
+
67
+ class StableDiffusionControlNetReferencePipeline(StableDiffusionControlNetPipeline):
68
+ def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
69
+ refimage = refimage.to(device=device, dtype=dtype)
70
+
71
+ # encode the mask image into latents space so we can concatenate it to the latents
72
+ if isinstance(generator, list):
73
+ ref_image_latents = [
74
+ self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i])
75
+ for i in range(batch_size)
76
+ ]
77
+ ref_image_latents = torch.cat(ref_image_latents, dim=0)
78
+ else:
79
+ ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator)
80
+ ref_image_latents = self.vae.config.scaling_factor * ref_image_latents
81
+
82
+ # duplicate mask and ref_image_latents for each generation per prompt, using mps friendly method
83
+ if ref_image_latents.shape[0] < batch_size:
84
+ if not batch_size % ref_image_latents.shape[0] == 0:
85
+ raise ValueError(
86
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
87
+ f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed."
88
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
89
+ )
90
+ ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1)
91
+
92
+ ref_image_latents = torch.cat([ref_image_latents] * 2) if do_classifier_free_guidance else ref_image_latents
93
+
94
+ # aligning device to prevent device errors when concating it with the latent model input
95
+ ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
96
+ return ref_image_latents
97
+
98
+ @torch.no_grad()
99
+ def __call__(
100
+ self,
101
+ prompt: Union[str, List[str]] = None,
102
+ image: Union[
103
+ torch.FloatTensor,
104
+ PIL.Image.Image,
105
+ np.ndarray,
106
+ List[torch.FloatTensor],
107
+ List[PIL.Image.Image],
108
+ List[np.ndarray],
109
+ ] = None,
110
+ ref_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
111
+ height: Optional[int] = None,
112
+ width: Optional[int] = None,
113
+ num_inference_steps: int = 50,
114
+ guidance_scale: float = 7.5,
115
+ negative_prompt: Optional[Union[str, List[str]]] = None,
116
+ num_images_per_prompt: Optional[int] = 1,
117
+ eta: float = 0.0,
118
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
119
+ latents: Optional[torch.FloatTensor] = None,
120
+ prompt_embeds: Optional[torch.FloatTensor] = None,
121
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
122
+ output_type: Optional[str] = "pil",
123
+ return_dict: bool = True,
124
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
125
+ callback_steps: int = 1,
126
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
127
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
128
+ guess_mode: bool = False,
129
+ attention_auto_machine_weight: float = 1.0,
130
+ gn_auto_machine_weight: float = 1.0,
131
+ style_fidelity: float = 0.5,
132
+ reference_attn: bool = True,
133
+ reference_adain: bool = True,
134
+ ):
135
+ r"""
136
+ Function invoked when calling the pipeline for generation.
137
+
138
+ Args:
139
+ prompt (`str` or `List[str]`, *optional*):
140
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
141
+ instead.
142
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
143
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
144
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
145
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
146
+ also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
147
+ height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
148
+ specified in init, images must be passed as a list such that each element of the list can be correctly
149
+ batched for input to a single controlnet.
150
+ ref_image (`torch.FloatTensor`, `PIL.Image.Image`):
151
+ The Reference Control input condition. Reference Control uses this input condition to generate guidance to Unet. If
152
+ the type is specified as `Torch.FloatTensor`, it is passed to Reference Control as is. `PIL.Image.Image` can
153
+ also be accepted as an image.
154
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
155
+ The height in pixels of the generated image.
156
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
157
+ The width in pixels of the generated image.
158
+ num_inference_steps (`int`, *optional*, defaults to 50):
159
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
160
+ expense of slower inference.
161
+ guidance_scale (`float`, *optional*, defaults to 7.5):
162
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
163
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
164
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
165
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
166
+ usually at the expense of lower image quality.
167
+ negative_prompt (`str` or `List[str]`, *optional*):
168
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
169
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
170
+ less than `1`).
171
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
172
+ The number of images to generate per prompt.
173
+ eta (`float`, *optional*, defaults to 0.0):
174
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
175
+ [`schedulers.DDIMScheduler`], will be ignored for others.
176
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
177
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
178
+ to make generation deterministic.
179
+ latents (`torch.FloatTensor`, *optional*):
180
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
181
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
182
+ tensor will ge generated by sampling using the supplied random `generator`.
183
+ prompt_embeds (`torch.FloatTensor`, *optional*):
184
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
185
+ provided, text embeddings will be generated from `prompt` input argument.
186
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
187
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
188
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
189
+ argument.
190
+ output_type (`str`, *optional*, defaults to `"pil"`):
191
+ The output format of the generate image. Choose between
192
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
193
+ return_dict (`bool`, *optional*, defaults to `True`):
194
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
195
+ plain tuple.
196
+ callback (`Callable`, *optional*):
197
+ A function that will be called every `callback_steps` steps during inference. The function will be
198
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
199
+ callback_steps (`int`, *optional*, defaults to 1):
200
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
201
+ called at every step.
202
+ cross_attention_kwargs (`dict`, *optional*):
203
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
204
+ `self.processor` in
205
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
206
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
207
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
208
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
209
+ corresponding scale as a list.
210
+ guess_mode (`bool`, *optional*, defaults to `False`):
211
+ In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
212
+ you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
213
+ attention_auto_machine_weight (`float`):
214
+ Weight of using reference query for self attention's context.
215
+ If attention_auto_machine_weight=1.0, use reference query for all self attention's context.
216
+ gn_auto_machine_weight (`float`):
217
+ Weight of using reference adain. If gn_auto_machine_weight=2.0, use all reference adain plugins.
218
+ style_fidelity (`float`):
219
+ style fidelity of ref_uncond_xt. If style_fidelity=1.0, control more important,
220
+ elif style_fidelity=0.0, prompt more important, else balanced.
221
+ reference_attn (`bool`):
222
+ Whether to use reference query for self attention's context.
223
+ reference_adain (`bool`):
224
+ Whether to use reference adain.
225
+
226
+ Examples:
227
+
228
+ Returns:
229
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
230
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
231
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
232
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
233
+ (nsfw) content, according to the `safety_checker`.
234
+ """
235
+ assert reference_attn or reference_adain, "`reference_attn` or `reference_adain` must be True."
236
+
237
+ # 1. Check inputs. Raise error if not correct
238
+ self.check_inputs(
239
+ prompt,
240
+ image,
241
+ callback_steps,
242
+ negative_prompt,
243
+ prompt_embeds,
244
+ negative_prompt_embeds,
245
+ controlnet_conditioning_scale,
246
+ )
247
+
248
+ # 2. Define call parameters
249
+ if prompt is not None and isinstance(prompt, str):
250
+ batch_size = 1
251
+ elif prompt is not None and isinstance(prompt, list):
252
+ batch_size = len(prompt)
253
+ else:
254
+ batch_size = prompt_embeds.shape[0]
255
+
256
+ device = self._execution_device
257
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
258
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
259
+ # corresponds to doing no classifier free guidance.
260
+ do_classifier_free_guidance = guidance_scale > 1.0
261
+
262
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
263
+
264
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
265
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
266
+
267
+ global_pool_conditions = (
268
+ controlnet.config.global_pool_conditions
269
+ if isinstance(controlnet, ControlNetModel)
270
+ else controlnet.nets[0].config.global_pool_conditions
271
+ )
272
+ guess_mode = guess_mode or global_pool_conditions
273
+
274
+ # 3. Encode input prompt
275
+ text_encoder_lora_scale = (
276
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
277
+ )
278
+ prompt_embeds = self._encode_prompt(
279
+ prompt,
280
+ device,
281
+ num_images_per_prompt,
282
+ do_classifier_free_guidance,
283
+ negative_prompt,
284
+ prompt_embeds=prompt_embeds,
285
+ negative_prompt_embeds=negative_prompt_embeds,
286
+ lora_scale=text_encoder_lora_scale,
287
+ )
288
+
289
+ # 4. Prepare image
290
+ if isinstance(controlnet, ControlNetModel):
291
+ image = self.prepare_image(
292
+ image=image,
293
+ width=width,
294
+ height=height,
295
+ batch_size=batch_size * num_images_per_prompt,
296
+ num_images_per_prompt=num_images_per_prompt,
297
+ device=device,
298
+ dtype=controlnet.dtype,
299
+ do_classifier_free_guidance=do_classifier_free_guidance,
300
+ guess_mode=guess_mode,
301
+ )
302
+ height, width = image.shape[-2:]
303
+ elif isinstance(controlnet, MultiControlNetModel):
304
+ images = []
305
+
306
+ for image_ in image:
307
+ image_ = self.prepare_image(
308
+ image=image_,
309
+ width=width,
310
+ height=height,
311
+ batch_size=batch_size * num_images_per_prompt,
312
+ num_images_per_prompt=num_images_per_prompt,
313
+ device=device,
314
+ dtype=controlnet.dtype,
315
+ do_classifier_free_guidance=do_classifier_free_guidance,
316
+ guess_mode=guess_mode,
317
+ )
318
+
319
+ images.append(image_)
320
+
321
+ image = images
322
+ height, width = image[0].shape[-2:]
323
+ else:
324
+ assert False
325
+
326
+ # 5. Preprocess reference image
327
+ ref_image = self.prepare_image(
328
+ image=ref_image,
329
+ width=width,
330
+ height=height,
331
+ batch_size=batch_size * num_images_per_prompt,
332
+ num_images_per_prompt=num_images_per_prompt,
333
+ device=device,
334
+ dtype=prompt_embeds.dtype,
335
+ )
336
+
337
+ # 6. Prepare timesteps
338
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
339
+ timesteps = self.scheduler.timesteps
340
+
341
+ # 7. Prepare latent variables
342
+ num_channels_latents = self.unet.config.in_channels
343
+ latents = self.prepare_latents(
344
+ batch_size * num_images_per_prompt,
345
+ num_channels_latents,
346
+ height,
347
+ width,
348
+ prompt_embeds.dtype,
349
+ device,
350
+ generator,
351
+ latents,
352
+ )
353
+
354
+ # 8. Prepare reference latent variables
355
+ ref_image_latents = self.prepare_ref_latents(
356
+ ref_image,
357
+ batch_size * num_images_per_prompt,
358
+ prompt_embeds.dtype,
359
+ device,
360
+ generator,
361
+ do_classifier_free_guidance,
362
+ )
363
+
364
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
365
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
366
+
367
+ # 9. Modify self attention and group norm
368
+ MODE = "write"
369
+ uc_mask = (
370
+ torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt)
371
+ .type_as(ref_image_latents)
372
+ .bool()
373
+ )
374
+
375
+ def hacked_basic_transformer_inner_forward(
376
+ self,
377
+ hidden_states: torch.FloatTensor,
378
+ attention_mask: Optional[torch.FloatTensor] = None,
379
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
380
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
381
+ timestep: Optional[torch.LongTensor] = None,
382
+ cross_attention_kwargs: Dict[str, Any] = None,
383
+ class_labels: Optional[torch.LongTensor] = None,
384
+ ):
385
+ if self.use_ada_layer_norm:
386
+ norm_hidden_states = self.norm1(hidden_states, timestep)
387
+ elif self.use_ada_layer_norm_zero:
388
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
389
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
390
+ )
391
+ else:
392
+ norm_hidden_states = self.norm1(hidden_states)
393
+
394
+ # 1. Self-Attention
395
+ cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
396
+ if self.only_cross_attention:
397
+ attn_output = self.attn1(
398
+ norm_hidden_states,
399
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
400
+ attention_mask=attention_mask,
401
+ **cross_attention_kwargs,
402
+ )
403
+ else:
404
+ if MODE == "write":
405
+ self.bank.append(norm_hidden_states.detach().clone())
406
+ attn_output = self.attn1(
407
+ norm_hidden_states,
408
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
409
+ attention_mask=attention_mask,
410
+ **cross_attention_kwargs,
411
+ )
412
+ if MODE == "read":
413
+ if attention_auto_machine_weight > self.attn_weight:
414
+ attn_output_uc = self.attn1(
415
+ norm_hidden_states,
416
+ encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),
417
+ # attention_mask=attention_mask,
418
+ **cross_attention_kwargs,
419
+ )
420
+ attn_output_c = attn_output_uc.clone()
421
+ if do_classifier_free_guidance and style_fidelity > 0:
422
+ attn_output_c[uc_mask] = self.attn1(
423
+ norm_hidden_states[uc_mask],
424
+ encoder_hidden_states=norm_hidden_states[uc_mask],
425
+ **cross_attention_kwargs,
426
+ )
427
+ attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc
428
+ self.bank.clear()
429
+ else:
430
+ attn_output = self.attn1(
431
+ norm_hidden_states,
432
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
433
+ attention_mask=attention_mask,
434
+ **cross_attention_kwargs,
435
+ )
436
+ if self.use_ada_layer_norm_zero:
437
+ attn_output = gate_msa.unsqueeze(1) * attn_output
438
+ hidden_states = attn_output + hidden_states
439
+
440
+ if self.attn2 is not None:
441
+ norm_hidden_states = (
442
+ self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
443
+ )
444
+
445
+ # 2. Cross-Attention
446
+ attn_output = self.attn2(
447
+ norm_hidden_states,
448
+ encoder_hidden_states=encoder_hidden_states,
449
+ attention_mask=encoder_attention_mask,
450
+ **cross_attention_kwargs,
451
+ )
452
+ hidden_states = attn_output + hidden_states
453
+
454
+ # 3. Feed-forward
455
+ norm_hidden_states = self.norm3(hidden_states)
456
+
457
+ if self.use_ada_layer_norm_zero:
458
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
459
+
460
+ ff_output = self.ff(norm_hidden_states)
461
+
462
+ if self.use_ada_layer_norm_zero:
463
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
464
+
465
+ hidden_states = ff_output + hidden_states
466
+
467
+ return hidden_states
468
+
469
+ def hacked_mid_forward(self, *args, **kwargs):
470
+ eps = 1e-6
471
+ x = self.original_forward(*args, **kwargs)
472
+ if MODE == "write":
473
+ if gn_auto_machine_weight >= self.gn_weight:
474
+ var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
475
+ self.mean_bank.append(mean)
476
+ self.var_bank.append(var)
477
+ if MODE == "read":
478
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
479
+ var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
480
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
481
+ mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))
482
+ var_acc = sum(self.var_bank) / float(len(self.var_bank))
483
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
484
+ x_uc = (((x - mean) / std) * std_acc) + mean_acc
485
+ x_c = x_uc.clone()
486
+ if do_classifier_free_guidance and style_fidelity > 0:
487
+ x_c[uc_mask] = x[uc_mask]
488
+ x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc
489
+ self.mean_bank = []
490
+ self.var_bank = []
491
+ return x
492
+
493
+ def hack_CrossAttnDownBlock2D_forward(
494
+ self,
495
+ hidden_states: torch.FloatTensor,
496
+ temb: Optional[torch.FloatTensor] = None,
497
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
498
+ attention_mask: Optional[torch.FloatTensor] = None,
499
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
500
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
501
+ ):
502
+ eps = 1e-6
503
+
504
+ # TODO(Patrick, William) - attention mask is not used
505
+ output_states = ()
506
+
507
+ for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
508
+ hidden_states = resnet(hidden_states, temb)
509
+ hidden_states = attn(
510
+ hidden_states,
511
+ encoder_hidden_states=encoder_hidden_states,
512
+ cross_attention_kwargs=cross_attention_kwargs,
513
+ attention_mask=attention_mask,
514
+ encoder_attention_mask=encoder_attention_mask,
515
+ return_dict=False,
516
+ )[0]
517
+ if MODE == "write":
518
+ if gn_auto_machine_weight >= self.gn_weight:
519
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
520
+ self.mean_bank.append([mean])
521
+ self.var_bank.append([var])
522
+ if MODE == "read":
523
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
524
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
525
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
526
+ mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
527
+ var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
528
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
529
+ hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
530
+ hidden_states_c = hidden_states_uc.clone()
531
+ if do_classifier_free_guidance and style_fidelity > 0:
532
+ hidden_states_c[uc_mask] = hidden_states[uc_mask]
533
+ hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
534
+
535
+ output_states = output_states + (hidden_states,)
536
+
537
+ if MODE == "read":
538
+ self.mean_bank = []
539
+ self.var_bank = []
540
+
541
+ if self.downsamplers is not None:
542
+ for downsampler in self.downsamplers:
543
+ hidden_states = downsampler(hidden_states)
544
+
545
+ output_states = output_states + (hidden_states,)
546
+
547
+ return hidden_states, output_states
548
+
549
+ def hacked_DownBlock2D_forward(self, hidden_states, temb=None, *args, **kwargs):
550
+ eps = 1e-6
551
+
552
+ output_states = ()
553
+
554
+ for i, resnet in enumerate(self.resnets):
555
+ hidden_states = resnet(hidden_states, temb)
556
+
557
+ if MODE == "write":
558
+ if gn_auto_machine_weight >= self.gn_weight:
559
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
560
+ self.mean_bank.append([mean])
561
+ self.var_bank.append([var])
562
+ if MODE == "read":
563
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
564
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
565
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
566
+ mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
567
+ var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
568
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
569
+ hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
570
+ hidden_states_c = hidden_states_uc.clone()
571
+ if do_classifier_free_guidance and style_fidelity > 0:
572
+ hidden_states_c[uc_mask] = hidden_states[uc_mask]
573
+ hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
574
+
575
+ output_states = output_states + (hidden_states,)
576
+
577
+ if MODE == "read":
578
+ self.mean_bank = []
579
+ self.var_bank = []
580
+
581
+ if self.downsamplers is not None:
582
+ for downsampler in self.downsamplers:
583
+ hidden_states = downsampler(hidden_states)
584
+
585
+ output_states = output_states + (hidden_states,)
586
+
587
+ return hidden_states, output_states
588
+
589
+ def hacked_CrossAttnUpBlock2D_forward(
590
+ self,
591
+ hidden_states: torch.FloatTensor,
592
+ res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
593
+ temb: Optional[torch.FloatTensor] = None,
594
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
595
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
596
+ upsample_size: Optional[int] = None,
597
+ attention_mask: Optional[torch.FloatTensor] = None,
598
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
599
+ ):
600
+ eps = 1e-6
601
+ # TODO(Patrick, William) - attention mask is not used
602
+ for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
603
+ # pop res hidden states
604
+ res_hidden_states = res_hidden_states_tuple[-1]
605
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
606
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
607
+ hidden_states = resnet(hidden_states, temb)
608
+ hidden_states = attn(
609
+ hidden_states,
610
+ encoder_hidden_states=encoder_hidden_states,
611
+ cross_attention_kwargs=cross_attention_kwargs,
612
+ attention_mask=attention_mask,
613
+ encoder_attention_mask=encoder_attention_mask,
614
+ return_dict=False,
615
+ )[0]
616
+
617
+ if MODE == "write":
618
+ if gn_auto_machine_weight >= self.gn_weight:
619
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
620
+ self.mean_bank.append([mean])
621
+ self.var_bank.append([var])
622
+ if MODE == "read":
623
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
624
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
625
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
626
+ mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
627
+ var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
628
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
629
+ hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
630
+ hidden_states_c = hidden_states_uc.clone()
631
+ if do_classifier_free_guidance and style_fidelity > 0:
632
+ hidden_states_c[uc_mask] = hidden_states[uc_mask]
633
+ hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
634
+
635
+ if MODE == "read":
636
+ self.mean_bank = []
637
+ self.var_bank = []
638
+
639
+ if self.upsamplers is not None:
640
+ for upsampler in self.upsamplers:
641
+ hidden_states = upsampler(hidden_states, upsample_size)
642
+
643
+ return hidden_states
644
+
645
+ def hacked_UpBlock2D_forward(
646
+ self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, *args, **kwargs
647
+ ):
648
+ eps = 1e-6
649
+ for i, resnet in enumerate(self.resnets):
650
+ # pop res hidden states
651
+ res_hidden_states = res_hidden_states_tuple[-1]
652
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
653
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
654
+ hidden_states = resnet(hidden_states, temb)
655
+
656
+ if MODE == "write":
657
+ if gn_auto_machine_weight >= self.gn_weight:
658
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
659
+ self.mean_bank.append([mean])
660
+ self.var_bank.append([var])
661
+ if MODE == "read":
662
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
663
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
664
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
665
+ mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
666
+ var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
667
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
668
+ hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
669
+ hidden_states_c = hidden_states_uc.clone()
670
+ if do_classifier_free_guidance and style_fidelity > 0:
671
+ hidden_states_c[uc_mask] = hidden_states[uc_mask]
672
+ hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
673
+
674
+ if MODE == "read":
675
+ self.mean_bank = []
676
+ self.var_bank = []
677
+
678
+ if self.upsamplers is not None:
679
+ for upsampler in self.upsamplers:
680
+ hidden_states = upsampler(hidden_states, upsample_size)
681
+
682
+ return hidden_states
683
+
684
+ if reference_attn:
685
+ attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)]
686
+ attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])
687
+
688
+ for i, module in enumerate(attn_modules):
689
+ module._original_inner_forward = module.forward
690
+ module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)
691
+ module.bank = []
692
+ module.attn_weight = float(i) / float(len(attn_modules))
693
+
694
+ if reference_adain:
695
+ gn_modules = [self.unet.mid_block]
696
+ self.unet.mid_block.gn_weight = 0
697
+
698
+ down_blocks = self.unet.down_blocks
699
+ for w, module in enumerate(down_blocks):
700
+ module.gn_weight = 1.0 - float(w) / float(len(down_blocks))
701
+ gn_modules.append(module)
702
+
703
+ up_blocks = self.unet.up_blocks
704
+ for w, module in enumerate(up_blocks):
705
+ module.gn_weight = float(w) / float(len(up_blocks))
706
+ gn_modules.append(module)
707
+
708
+ for i, module in enumerate(gn_modules):
709
+ if getattr(module, "original_forward", None) is None:
710
+ module.original_forward = module.forward
711
+ if i == 0:
712
+ # mid_block
713
+ module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)
714
+ elif isinstance(module, CrossAttnDownBlock2D):
715
+ module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)
716
+ elif isinstance(module, DownBlock2D):
717
+ module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)
718
+ elif isinstance(module, CrossAttnUpBlock2D):
719
+ module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)
720
+ elif isinstance(module, UpBlock2D):
721
+ module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)
722
+ module.mean_bank = []
723
+ module.var_bank = []
724
+ module.gn_weight *= 2
725
+
726
+ # 11. Denoising loop
727
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
728
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
729
+ for i, t in enumerate(timesteps):
730
+ # expand the latents if we are doing classifier free guidance
731
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
732
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
733
+
734
+ # controlnet(s) inference
735
+ if guess_mode and do_classifier_free_guidance:
736
+ # Infer ControlNet only for the conditional batch.
737
+ control_model_input = latents
738
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
739
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
740
+ else:
741
+ control_model_input = latent_model_input
742
+ controlnet_prompt_embeds = prompt_embeds
743
+
744
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
745
+ control_model_input,
746
+ t,
747
+ encoder_hidden_states=controlnet_prompt_embeds,
748
+ controlnet_cond=image,
749
+ conditioning_scale=controlnet_conditioning_scale,
750
+ guess_mode=guess_mode,
751
+ return_dict=False,
752
+ )
753
+
754
+ if guess_mode and do_classifier_free_guidance:
755
+ # Infered ControlNet only for the conditional batch.
756
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
757
+ # add 0 to the unconditional batch to keep it unchanged.
758
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
759
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
760
+
761
+ # ref only part
762
+ noise = randn_tensor(
763
+ ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype
764
+ )
765
+ ref_xt = self.scheduler.add_noise(
766
+ ref_image_latents,
767
+ noise,
768
+ t.reshape(
769
+ 1,
770
+ ),
771
+ )
772
+ ref_xt = self.scheduler.scale_model_input(ref_xt, t)
773
+
774
+ MODE = "write"
775
+ self.unet(
776
+ ref_xt,
777
+ t,
778
+ encoder_hidden_states=prompt_embeds,
779
+ cross_attention_kwargs=cross_attention_kwargs,
780
+ return_dict=False,
781
+ )
782
+
783
+ # predict the noise residual
784
+ MODE = "read"
785
+ noise_pred = self.unet(
786
+ latent_model_input,
787
+ t,
788
+ encoder_hidden_states=prompt_embeds,
789
+ cross_attention_kwargs=cross_attention_kwargs,
790
+ down_block_additional_residuals=down_block_res_samples,
791
+ mid_block_additional_residual=mid_block_res_sample,
792
+ return_dict=False,
793
+ )[0]
794
+
795
+ # perform guidance
796
+ if do_classifier_free_guidance:
797
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
798
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
799
+
800
+ # compute the previous noisy sample x_t -> x_t-1
801
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
802
+
803
+ # call the callback, if provided
804
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
805
+ progress_bar.update()
806
+ if callback is not None and i % callback_steps == 0:
807
+ step_idx = i // getattr(self.scheduler, "order", 1)
808
+ callback(step_idx, t, latents)
809
+
810
+ # If we do sequential model offloading, let's offload unet and controlnet
811
+ # manually for max memory savings
812
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
813
+ self.unet.to("cpu")
814
+ self.controlnet.to("cpu")
815
+ torch.cuda.empty_cache()
816
+
817
+ if not output_type == "latent":
818
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
819
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
820
+ else:
821
+ image = latents
822
+ has_nsfw_concept = None
823
+
824
+ if has_nsfw_concept is None:
825
+ do_denormalize = [True] * image.shape[0]
826
+ else:
827
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
828
+
829
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
830
+
831
+ # Offload last model to CPU
832
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
833
+ self.final_offload_hook.offload()
834
+
835
+ if not return_dict:
836
+ return (image, has_nsfw_concept)
837
+
838
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/stable_diffusion_ipex.py ADDED
@@ -0,0 +1,852 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import intel_extension_for_pytorch as ipex
19
+ import torch
20
+ from packaging import version
21
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
22
+
23
+ from diffusers.configuration_utils import FrozenDict
24
+ from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
25
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
26
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
27
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
28
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
29
+ from diffusers.schedulers import KarrasDiffusionSchedulers
30
+ from diffusers.utils import (
31
+ deprecate,
32
+ is_accelerate_available,
33
+ is_accelerate_version,
34
+ logging,
35
+ replace_example_docstring,
36
+ )
37
+ from diffusers.utils.torch_utils import randn_tensor
38
+
39
+
40
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
+
42
+ EXAMPLE_DOC_STRING = """
43
+ Examples:
44
+ ```py
45
+ >>> import torch
46
+ >>> from diffusers import StableDiffusionPipeline
47
+
48
+ >>> pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_ipex")
49
+
50
+ >>> # For Float32
51
+ >>> pipe.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
52
+ >>> # For BFloat16
53
+ >>> pipe.prepare_for_ipex(prompt, dtype=torch.bfloat16, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
54
+
55
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
56
+ >>> # For Float32
57
+ >>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512).images[0] #value of image height/width should be consistent with 'prepare_for_ipex()'
58
+ >>> # For BFloat16
59
+ >>> with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
60
+ >>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512).images[0] #value of image height/width should be consistent with 'prepare_for_ipex()'
61
+ ```
62
+ """
63
+
64
+
65
+ class StableDiffusionIPEXPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
66
+ r"""
67
+ Pipeline for text-to-image generation using Stable Diffusion on IPEX.
68
+
69
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
70
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
71
+
72
+ Args:
73
+ vae ([`AutoencoderKL`]):
74
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
75
+ text_encoder ([`CLIPTextModel`]):
76
+ Frozen text-encoder. Stable Diffusion uses the text portion of
77
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
78
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
79
+ tokenizer (`CLIPTokenizer`):
80
+ Tokenizer of class
81
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
82
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
83
+ scheduler ([`SchedulerMixin`]):
84
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
85
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
86
+ safety_checker ([`StableDiffusionSafetyChecker`]):
87
+ Classification module that estimates whether generated images could be considered offensive or harmful.
88
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
89
+ feature_extractor ([`CLIPFeatureExtractor`]):
90
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
91
+ """
92
+
93
+ _optional_components = ["safety_checker", "feature_extractor"]
94
+
95
+ def __init__(
96
+ self,
97
+ vae: AutoencoderKL,
98
+ text_encoder: CLIPTextModel,
99
+ tokenizer: CLIPTokenizer,
100
+ unet: UNet2DConditionModel,
101
+ scheduler: KarrasDiffusionSchedulers,
102
+ safety_checker: StableDiffusionSafetyChecker,
103
+ feature_extractor: CLIPFeatureExtractor,
104
+ requires_safety_checker: bool = True,
105
+ ):
106
+ super().__init__()
107
+
108
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
109
+ deprecation_message = (
110
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
111
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
112
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
113
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
114
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
115
+ " file"
116
+ )
117
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
118
+ new_config = dict(scheduler.config)
119
+ new_config["steps_offset"] = 1
120
+ scheduler._internal_dict = FrozenDict(new_config)
121
+
122
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
123
+ deprecation_message = (
124
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
125
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
126
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
127
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
128
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
129
+ )
130
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
131
+ new_config = dict(scheduler.config)
132
+ new_config["clip_sample"] = False
133
+ scheduler._internal_dict = FrozenDict(new_config)
134
+
135
+ if safety_checker is None and requires_safety_checker:
136
+ logger.warning(
137
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
138
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
139
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
140
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
141
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
142
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
143
+ )
144
+
145
+ if safety_checker is not None and feature_extractor is None:
146
+ raise ValueError(
147
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
148
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
149
+ )
150
+
151
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
152
+ version.parse(unet.config._diffusers_version).base_version
153
+ ) < version.parse("0.9.0.dev0")
154
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
155
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
156
+ deprecation_message = (
157
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
158
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
159
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
160
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
161
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
162
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
163
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
164
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
165
+ " the `unet/config.json` file"
166
+ )
167
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
168
+ new_config = dict(unet.config)
169
+ new_config["sample_size"] = 64
170
+ unet._internal_dict = FrozenDict(new_config)
171
+
172
+ self.register_modules(
173
+ vae=vae,
174
+ text_encoder=text_encoder,
175
+ tokenizer=tokenizer,
176
+ unet=unet,
177
+ scheduler=scheduler,
178
+ safety_checker=safety_checker,
179
+ feature_extractor=feature_extractor,
180
+ )
181
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
182
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
183
+
184
+ def get_input_example(self, prompt, height=None, width=None, guidance_scale=7.5, num_images_per_prompt=1):
185
+ prompt_embeds = None
186
+ negative_prompt_embeds = None
187
+ negative_prompt = None
188
+ callback_steps = 1
189
+ generator = None
190
+ latents = None
191
+
192
+ # 0. Default height and width to unet
193
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
194
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
195
+
196
+ # 1. Check inputs. Raise error if not correct
197
+ self.check_inputs(
198
+ prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
199
+ )
200
+
201
+ # 2. Define call parameters
202
+ if prompt is not None and isinstance(prompt, str):
203
+ batch_size = 1
204
+ elif prompt is not None and isinstance(prompt, list):
205
+ batch_size = len(prompt)
206
+
207
+ device = "cpu"
208
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
209
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
210
+ # corresponds to doing no classifier free guidance.
211
+ do_classifier_free_guidance = guidance_scale > 1.0
212
+
213
+ # 3. Encode input prompt
214
+ prompt_embeds = self._encode_prompt(
215
+ prompt,
216
+ device,
217
+ num_images_per_prompt,
218
+ do_classifier_free_guidance,
219
+ negative_prompt,
220
+ prompt_embeds=prompt_embeds,
221
+ negative_prompt_embeds=negative_prompt_embeds,
222
+ )
223
+
224
+ # 5. Prepare latent variables
225
+ latents = self.prepare_latents(
226
+ batch_size * num_images_per_prompt,
227
+ self.unet.in_channels,
228
+ height,
229
+ width,
230
+ prompt_embeds.dtype,
231
+ device,
232
+ generator,
233
+ latents,
234
+ )
235
+ dummy = torch.ones(1, dtype=torch.int32)
236
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
237
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, dummy)
238
+
239
+ unet_input_example = (latent_model_input, dummy, prompt_embeds)
240
+ vae_decoder_input_example = latents
241
+
242
+ return unet_input_example, vae_decoder_input_example
243
+
244
+ def prepare_for_ipex(self, promt, dtype=torch.float32, height=None, width=None, guidance_scale=7.5):
245
+ self.unet = self.unet.to(memory_format=torch.channels_last)
246
+ self.vae.decoder = self.vae.decoder.to(memory_format=torch.channels_last)
247
+ self.text_encoder = self.text_encoder.to(memory_format=torch.channels_last)
248
+ if self.safety_checker is not None:
249
+ self.safety_checker = self.safety_checker.to(memory_format=torch.channels_last)
250
+
251
+ unet_input_example, vae_decoder_input_example = self.get_input_example(promt, height, width, guidance_scale)
252
+
253
+ # optimize with ipex
254
+ if dtype == torch.bfloat16:
255
+ self.unet = ipex.optimize(self.unet.eval(), dtype=torch.bfloat16, inplace=True)
256
+ self.vae.decoder = ipex.optimize(self.vae.decoder.eval(), dtype=torch.bfloat16, inplace=True)
257
+ self.text_encoder = ipex.optimize(self.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
258
+ if self.safety_checker is not None:
259
+ self.safety_checker = ipex.optimize(self.safety_checker.eval(), dtype=torch.bfloat16, inplace=True)
260
+ elif dtype == torch.float32:
261
+ self.unet = ipex.optimize(
262
+ self.unet.eval(),
263
+ dtype=torch.float32,
264
+ inplace=True,
265
+ weights_prepack=True,
266
+ auto_kernel_selection=False,
267
+ )
268
+ self.vae.decoder = ipex.optimize(
269
+ self.vae.decoder.eval(),
270
+ dtype=torch.float32,
271
+ inplace=True,
272
+ weights_prepack=True,
273
+ auto_kernel_selection=False,
274
+ )
275
+ self.text_encoder = ipex.optimize(
276
+ self.text_encoder.eval(),
277
+ dtype=torch.float32,
278
+ inplace=True,
279
+ weights_prepack=True,
280
+ auto_kernel_selection=False,
281
+ )
282
+ if self.safety_checker is not None:
283
+ self.safety_checker = ipex.optimize(
284
+ self.safety_checker.eval(),
285
+ dtype=torch.float32,
286
+ inplace=True,
287
+ weights_prepack=True,
288
+ auto_kernel_selection=False,
289
+ )
290
+ else:
291
+ raise ValueError(" The value of 'dtype' should be 'torch.bfloat16' or 'torch.float32' !")
292
+
293
+ # trace unet model to get better performance on IPEX
294
+ with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
295
+ unet_trace_model = torch.jit.trace(self.unet, unet_input_example, check_trace=False, strict=False)
296
+ unet_trace_model = torch.jit.freeze(unet_trace_model)
297
+ self.unet.forward = unet_trace_model.forward
298
+
299
+ # trace vae.decoder model to get better performance on IPEX
300
+ with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
301
+ ave_decoder_trace_model = torch.jit.trace(
302
+ self.vae.decoder, vae_decoder_input_example, check_trace=False, strict=False
303
+ )
304
+ ave_decoder_trace_model = torch.jit.freeze(ave_decoder_trace_model)
305
+ self.vae.decoder.forward = ave_decoder_trace_model.forward
306
+
307
+ def enable_vae_slicing(self):
308
+ r"""
309
+ Enable sliced VAE decoding.
310
+
311
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
312
+ steps. This is useful to save some memory and allow larger batch sizes.
313
+ """
314
+ self.vae.enable_slicing()
315
+
316
+ def disable_vae_slicing(self):
317
+ r"""
318
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
319
+ computing decoding in one step.
320
+ """
321
+ self.vae.disable_slicing()
322
+
323
+ def enable_vae_tiling(self):
324
+ r"""
325
+ Enable tiled VAE decoding.
326
+
327
+ When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
328
+ several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
329
+ """
330
+ self.vae.enable_tiling()
331
+
332
+ def disable_vae_tiling(self):
333
+ r"""
334
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
335
+ computing decoding in one step.
336
+ """
337
+ self.vae.disable_tiling()
338
+
339
+ def enable_sequential_cpu_offload(self, gpu_id=0):
340
+ r"""
341
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
342
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
343
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
344
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
345
+ `enable_model_cpu_offload`, but performance is lower.
346
+ """
347
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
348
+ from accelerate import cpu_offload
349
+ else:
350
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
351
+
352
+ device = torch.device(f"cuda:{gpu_id}")
353
+
354
+ if self.device.type != "cpu":
355
+ self.to("cpu", silence_dtype_warnings=True)
356
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
357
+
358
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
359
+ cpu_offload(cpu_offloaded_model, device)
360
+
361
+ if self.safety_checker is not None:
362
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
363
+
364
+ def enable_model_cpu_offload(self, gpu_id=0):
365
+ r"""
366
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
367
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
368
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
369
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
370
+ """
371
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
372
+ from accelerate import cpu_offload_with_hook
373
+ else:
374
+ raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.")
375
+
376
+ device = torch.device(f"cuda:{gpu_id}")
377
+
378
+ if self.device.type != "cpu":
379
+ self.to("cpu", silence_dtype_warnings=True)
380
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
381
+
382
+ hook = None
383
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
384
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
385
+
386
+ if self.safety_checker is not None:
387
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
388
+
389
+ # We'll offload the last model manually.
390
+ self.final_offload_hook = hook
391
+
392
+ @property
393
+ def _execution_device(self):
394
+ r"""
395
+ Returns the device on which the pipeline's models will be executed. After calling
396
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
397
+ hooks.
398
+ """
399
+ if not hasattr(self.unet, "_hf_hook"):
400
+ return self.device
401
+ for module in self.unet.modules():
402
+ if (
403
+ hasattr(module, "_hf_hook")
404
+ and hasattr(module._hf_hook, "execution_device")
405
+ and module._hf_hook.execution_device is not None
406
+ ):
407
+ return torch.device(module._hf_hook.execution_device)
408
+ return self.device
409
+
410
+ def _encode_prompt(
411
+ self,
412
+ prompt,
413
+ device,
414
+ num_images_per_prompt,
415
+ do_classifier_free_guidance,
416
+ negative_prompt=None,
417
+ prompt_embeds: Optional[torch.FloatTensor] = None,
418
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
419
+ ):
420
+ r"""
421
+ Encodes the prompt into text encoder hidden states.
422
+
423
+ Args:
424
+ prompt (`str` or `List[str]`, *optional*):
425
+ prompt to be encoded
426
+ device: (`torch.device`):
427
+ torch device
428
+ num_images_per_prompt (`int`):
429
+ number of images that should be generated per prompt
430
+ do_classifier_free_guidance (`bool`):
431
+ whether to use classifier free guidance or not
432
+ negative_prompt (`str` or `List[str]`, *optional*):
433
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
434
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
435
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
436
+ prompt_embeds (`torch.FloatTensor`, *optional*):
437
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
438
+ provided, text embeddings will be generated from `prompt` input argument.
439
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
440
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
441
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
442
+ argument.
443
+ """
444
+ if prompt is not None and isinstance(prompt, str):
445
+ batch_size = 1
446
+ elif prompt is not None and isinstance(prompt, list):
447
+ batch_size = len(prompt)
448
+ else:
449
+ batch_size = prompt_embeds.shape[0]
450
+
451
+ if prompt_embeds is None:
452
+ # textual inversion: procecss multi-vector tokens if necessary
453
+ if isinstance(self, TextualInversionLoaderMixin):
454
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
455
+
456
+ text_inputs = self.tokenizer(
457
+ prompt,
458
+ padding="max_length",
459
+ max_length=self.tokenizer.model_max_length,
460
+ truncation=True,
461
+ return_tensors="pt",
462
+ )
463
+ text_input_ids = text_inputs.input_ids
464
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
465
+
466
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
467
+ text_input_ids, untruncated_ids
468
+ ):
469
+ removed_text = self.tokenizer.batch_decode(
470
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
471
+ )
472
+ logger.warning(
473
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
474
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
475
+ )
476
+
477
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
478
+ attention_mask = text_inputs.attention_mask.to(device)
479
+ else:
480
+ attention_mask = None
481
+
482
+ prompt_embeds = self.text_encoder(
483
+ text_input_ids.to(device),
484
+ attention_mask=attention_mask,
485
+ )
486
+ prompt_embeds = prompt_embeds[0]
487
+
488
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
489
+
490
+ bs_embed, seq_len, _ = prompt_embeds.shape
491
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
492
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
493
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
494
+
495
+ # get unconditional embeddings for classifier free guidance
496
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
497
+ uncond_tokens: List[str]
498
+ if negative_prompt is None:
499
+ uncond_tokens = [""] * batch_size
500
+ elif type(prompt) is not type(negative_prompt):
501
+ raise TypeError(
502
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
503
+ f" {type(prompt)}."
504
+ )
505
+ elif isinstance(negative_prompt, str):
506
+ uncond_tokens = [negative_prompt]
507
+ elif batch_size != len(negative_prompt):
508
+ raise ValueError(
509
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
510
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
511
+ " the batch size of `prompt`."
512
+ )
513
+ else:
514
+ uncond_tokens = negative_prompt
515
+
516
+ # textual inversion: procecss multi-vector tokens if necessary
517
+ if isinstance(self, TextualInversionLoaderMixin):
518
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
519
+
520
+ max_length = prompt_embeds.shape[1]
521
+ uncond_input = self.tokenizer(
522
+ uncond_tokens,
523
+ padding="max_length",
524
+ max_length=max_length,
525
+ truncation=True,
526
+ return_tensors="pt",
527
+ )
528
+
529
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
530
+ attention_mask = uncond_input.attention_mask.to(device)
531
+ else:
532
+ attention_mask = None
533
+
534
+ negative_prompt_embeds = self.text_encoder(
535
+ uncond_input.input_ids.to(device),
536
+ attention_mask=attention_mask,
537
+ )
538
+ negative_prompt_embeds = negative_prompt_embeds[0]
539
+
540
+ if do_classifier_free_guidance:
541
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
542
+ seq_len = negative_prompt_embeds.shape[1]
543
+
544
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
545
+
546
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
547
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
548
+
549
+ # For classifier free guidance, we need to do two forward passes.
550
+ # Here we concatenate the unconditional and text embeddings into a single batch
551
+ # to avoid doing two forward passes
552
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
553
+
554
+ return prompt_embeds
555
+
556
+ def run_safety_checker(self, image, device, dtype):
557
+ if self.safety_checker is not None:
558
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
559
+ image, has_nsfw_concept = self.safety_checker(
560
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
561
+ )
562
+ else:
563
+ has_nsfw_concept = None
564
+ return image, has_nsfw_concept
565
+
566
+ def decode_latents(self, latents):
567
+ latents = 1 / self.vae.config.scaling_factor * latents
568
+ image = self.vae.decode(latents).sample
569
+ image = (image / 2 + 0.5).clamp(0, 1)
570
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
571
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
572
+ return image
573
+
574
+ def prepare_extra_step_kwargs(self, generator, eta):
575
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
576
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
577
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
578
+ # and should be between [0, 1]
579
+
580
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
581
+ extra_step_kwargs = {}
582
+ if accepts_eta:
583
+ extra_step_kwargs["eta"] = eta
584
+
585
+ # check if the scheduler accepts generator
586
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
587
+ if accepts_generator:
588
+ extra_step_kwargs["generator"] = generator
589
+ return extra_step_kwargs
590
+
591
+ def check_inputs(
592
+ self,
593
+ prompt,
594
+ height,
595
+ width,
596
+ callback_steps,
597
+ negative_prompt=None,
598
+ prompt_embeds=None,
599
+ negative_prompt_embeds=None,
600
+ ):
601
+ if height % 8 != 0 or width % 8 != 0:
602
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
603
+
604
+ if (callback_steps is None) or (
605
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
606
+ ):
607
+ raise ValueError(
608
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
609
+ f" {type(callback_steps)}."
610
+ )
611
+
612
+ if prompt is not None and prompt_embeds is not None:
613
+ raise ValueError(
614
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
615
+ " only forward one of the two."
616
+ )
617
+ elif prompt is None and prompt_embeds is None:
618
+ raise ValueError(
619
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
620
+ )
621
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
622
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
623
+
624
+ if negative_prompt is not None and negative_prompt_embeds is not None:
625
+ raise ValueError(
626
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
627
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
628
+ )
629
+
630
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
631
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
632
+ raise ValueError(
633
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
634
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
635
+ f" {negative_prompt_embeds.shape}."
636
+ )
637
+
638
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
639
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
640
+ if isinstance(generator, list) and len(generator) != batch_size:
641
+ raise ValueError(
642
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
643
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
644
+ )
645
+
646
+ if latents is None:
647
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
648
+ else:
649
+ latents = latents.to(device)
650
+
651
+ # scale the initial noise by the standard deviation required by the scheduler
652
+ latents = latents * self.scheduler.init_noise_sigma
653
+ return latents
654
+
655
+ @torch.no_grad()
656
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
657
+ def __call__(
658
+ self,
659
+ prompt: Union[str, List[str]] = None,
660
+ height: Optional[int] = None,
661
+ width: Optional[int] = None,
662
+ num_inference_steps: int = 50,
663
+ guidance_scale: float = 7.5,
664
+ negative_prompt: Optional[Union[str, List[str]]] = None,
665
+ num_images_per_prompt: Optional[int] = 1,
666
+ eta: float = 0.0,
667
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
668
+ latents: Optional[torch.FloatTensor] = None,
669
+ prompt_embeds: Optional[torch.FloatTensor] = None,
670
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
671
+ output_type: Optional[str] = "pil",
672
+ return_dict: bool = True,
673
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
674
+ callback_steps: int = 1,
675
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
676
+ ):
677
+ r"""
678
+ Function invoked when calling the pipeline for generation.
679
+
680
+ Args:
681
+ prompt (`str` or `List[str]`, *optional*):
682
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
683
+ instead.
684
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
685
+ The height in pixels of the generated image.
686
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
687
+ The width in pixels of the generated image.
688
+ num_inference_steps (`int`, *optional*, defaults to 50):
689
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
690
+ expense of slower inference.
691
+ guidance_scale (`float`, *optional*, defaults to 7.5):
692
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
693
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
694
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
695
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
696
+ usually at the expense of lower image quality.
697
+ negative_prompt (`str` or `List[str]`, *optional*):
698
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
699
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
700
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
701
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
702
+ The number of images to generate per prompt.
703
+ eta (`float`, *optional*, defaults to 0.0):
704
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
705
+ [`schedulers.DDIMScheduler`], will be ignored for others.
706
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
707
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
708
+ to make generation deterministic.
709
+ latents (`torch.FloatTensor`, *optional*):
710
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
711
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
712
+ tensor will ge generated by sampling using the supplied random `generator`.
713
+ prompt_embeds (`torch.FloatTensor`, *optional*):
714
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
715
+ provided, text embeddings will be generated from `prompt` input argument.
716
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
717
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
718
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
719
+ argument.
720
+ output_type (`str`, *optional*, defaults to `"pil"`):
721
+ The output format of the generate image. Choose between
722
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
723
+ return_dict (`bool`, *optional*, defaults to `True`):
724
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
725
+ plain tuple.
726
+ callback (`Callable`, *optional*):
727
+ A function that will be called every `callback_steps` steps during inference. The function will be
728
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
729
+ callback_steps (`int`, *optional*, defaults to 1):
730
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
731
+ called at every step.
732
+ cross_attention_kwargs (`dict`, *optional*):
733
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
734
+ `self.processor` in
735
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
736
+
737
+ Examples:
738
+
739
+ Returns:
740
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
741
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
742
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
743
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
744
+ (nsfw) content, according to the `safety_checker`.
745
+ """
746
+ # 0. Default height and width to unet
747
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
748
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
749
+
750
+ # 1. Check inputs. Raise error if not correct
751
+ self.check_inputs(
752
+ prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
753
+ )
754
+
755
+ # 2. Define call parameters
756
+ if prompt is not None and isinstance(prompt, str):
757
+ batch_size = 1
758
+ elif prompt is not None and isinstance(prompt, list):
759
+ batch_size = len(prompt)
760
+ else:
761
+ batch_size = prompt_embeds.shape[0]
762
+
763
+ device = self._execution_device
764
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
765
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
766
+ # corresponds to doing no classifier free guidance.
767
+ do_classifier_free_guidance = guidance_scale > 1.0
768
+
769
+ # 3. Encode input prompt
770
+ prompt_embeds = self._encode_prompt(
771
+ prompt,
772
+ device,
773
+ num_images_per_prompt,
774
+ do_classifier_free_guidance,
775
+ negative_prompt,
776
+ prompt_embeds=prompt_embeds,
777
+ negative_prompt_embeds=negative_prompt_embeds,
778
+ )
779
+
780
+ # 4. Prepare timesteps
781
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
782
+ timesteps = self.scheduler.timesteps
783
+
784
+ # 5. Prepare latent variables
785
+ num_channels_latents = self.unet.in_channels
786
+ latents = self.prepare_latents(
787
+ batch_size * num_images_per_prompt,
788
+ num_channels_latents,
789
+ height,
790
+ width,
791
+ prompt_embeds.dtype,
792
+ device,
793
+ generator,
794
+ latents,
795
+ )
796
+
797
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
798
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
799
+
800
+ # 7. Denoising loop
801
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
802
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
803
+ for i, t in enumerate(timesteps):
804
+ # expand the latents if we are doing classifier free guidance
805
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
806
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
807
+
808
+ # predict the noise residual
809
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds)["sample"]
810
+
811
+ # perform guidance
812
+ if do_classifier_free_guidance:
813
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
814
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
815
+
816
+ # compute the previous noisy sample x_t -> x_t-1
817
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
818
+
819
+ # call the callback, if provided
820
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
821
+ progress_bar.update()
822
+ if callback is not None and i % callback_steps == 0:
823
+ step_idx = i // getattr(self.scheduler, "order", 1)
824
+ callback(step_idx, t, latents)
825
+
826
+ if output_type == "latent":
827
+ image = latents
828
+ has_nsfw_concept = None
829
+ elif output_type == "pil":
830
+ # 8. Post-processing
831
+ image = self.decode_latents(latents)
832
+
833
+ # 9. Run safety checker
834
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
835
+
836
+ # 10. Convert to PIL
837
+ image = self.numpy_to_pil(image)
838
+ else:
839
+ # 8. Post-processing
840
+ image = self.decode_latents(latents)
841
+
842
+ # 9. Run safety checker
843
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
844
+
845
+ # Offload last model to CPU
846
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
847
+ self.final_offload_hook.offload()
848
+
849
+ if not return_dict:
850
+ return (image, has_nsfw_concept)
851
+
852
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/stable_diffusion_mega.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Union
2
+
3
+ import PIL.Image
4
+ import torch
5
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
6
+
7
+ from diffusers import (
8
+ AutoencoderKL,
9
+ DDIMScheduler,
10
+ DiffusionPipeline,
11
+ LMSDiscreteScheduler,
12
+ PNDMScheduler,
13
+ StableDiffusionImg2ImgPipeline,
14
+ StableDiffusionInpaintPipelineLegacy,
15
+ StableDiffusionPipeline,
16
+ UNet2DConditionModel,
17
+ )
18
+ from diffusers.configuration_utils import FrozenDict
19
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
20
+ from diffusers.utils import deprecate, logging
21
+
22
+
23
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
+
25
+
26
+ class StableDiffusionMegaPipeline(DiffusionPipeline):
27
+ r"""
28
+ Pipeline for text-to-image generation using Stable Diffusion.
29
+
30
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
31
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
32
+
33
+ Args:
34
+ vae ([`AutoencoderKL`]):
35
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
36
+ text_encoder ([`CLIPTextModel`]):
37
+ Frozen text-encoder. Stable Diffusion uses the text portion of
38
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
39
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
40
+ tokenizer (`CLIPTokenizer`):
41
+ Tokenizer of class
42
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
43
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
44
+ scheduler ([`SchedulerMixin`]):
45
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
46
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
47
+ safety_checker ([`StableDiffusionMegaSafetyChecker`]):
48
+ Classification module that estimates whether generated images could be considered offensive or harmful.
49
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
50
+ feature_extractor ([`CLIPImageProcessor`]):
51
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
52
+ """
53
+
54
+ _optional_components = ["safety_checker", "feature_extractor"]
55
+
56
+ def __init__(
57
+ self,
58
+ vae: AutoencoderKL,
59
+ text_encoder: CLIPTextModel,
60
+ tokenizer: CLIPTokenizer,
61
+ unet: UNet2DConditionModel,
62
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
63
+ safety_checker: StableDiffusionSafetyChecker,
64
+ feature_extractor: CLIPImageProcessor,
65
+ requires_safety_checker: bool = True,
66
+ ):
67
+ super().__init__()
68
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
69
+ deprecation_message = (
70
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
71
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
72
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
73
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
74
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
75
+ " file"
76
+ )
77
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
78
+ new_config = dict(scheduler.config)
79
+ new_config["steps_offset"] = 1
80
+ scheduler._internal_dict = FrozenDict(new_config)
81
+
82
+ self.register_modules(
83
+ vae=vae,
84
+ text_encoder=text_encoder,
85
+ tokenizer=tokenizer,
86
+ unet=unet,
87
+ scheduler=scheduler,
88
+ safety_checker=safety_checker,
89
+ feature_extractor=feature_extractor,
90
+ )
91
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
92
+
93
+ @property
94
+ def components(self) -> Dict[str, Any]:
95
+ return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
96
+
97
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
98
+ r"""
99
+ Enable sliced attention computation.
100
+
101
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
102
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
103
+
104
+ Args:
105
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
106
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
107
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
108
+ `attention_head_dim` must be a multiple of `slice_size`.
109
+ """
110
+ if slice_size == "auto":
111
+ # half the attention head size is usually a good trade-off between
112
+ # speed and memory
113
+ slice_size = self.unet.config.attention_head_dim // 2
114
+ self.unet.set_attention_slice(slice_size)
115
+
116
+ def disable_attention_slicing(self):
117
+ r"""
118
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
119
+ back to computing attention in one step.
120
+ """
121
+ # set slice_size = `None` to disable `attention slicing`
122
+ self.enable_attention_slicing(None)
123
+
124
+ @torch.no_grad()
125
+ def inpaint(
126
+ self,
127
+ prompt: Union[str, List[str]],
128
+ image: Union[torch.FloatTensor, PIL.Image.Image],
129
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
130
+ strength: float = 0.8,
131
+ num_inference_steps: Optional[int] = 50,
132
+ guidance_scale: Optional[float] = 7.5,
133
+ negative_prompt: Optional[Union[str, List[str]]] = None,
134
+ num_images_per_prompt: Optional[int] = 1,
135
+ eta: Optional[float] = 0.0,
136
+ generator: Optional[torch.Generator] = None,
137
+ output_type: Optional[str] = "pil",
138
+ return_dict: bool = True,
139
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
140
+ callback_steps: int = 1,
141
+ ):
142
+ # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline
143
+ return StableDiffusionInpaintPipelineLegacy(**self.components)(
144
+ prompt=prompt,
145
+ image=image,
146
+ mask_image=mask_image,
147
+ strength=strength,
148
+ num_inference_steps=num_inference_steps,
149
+ guidance_scale=guidance_scale,
150
+ negative_prompt=negative_prompt,
151
+ num_images_per_prompt=num_images_per_prompt,
152
+ eta=eta,
153
+ generator=generator,
154
+ output_type=output_type,
155
+ return_dict=return_dict,
156
+ callback=callback,
157
+ )
158
+
159
+ @torch.no_grad()
160
+ def img2img(
161
+ self,
162
+ prompt: Union[str, List[str]],
163
+ image: Union[torch.FloatTensor, PIL.Image.Image],
164
+ strength: float = 0.8,
165
+ num_inference_steps: Optional[int] = 50,
166
+ guidance_scale: Optional[float] = 7.5,
167
+ negative_prompt: Optional[Union[str, List[str]]] = None,
168
+ num_images_per_prompt: Optional[int] = 1,
169
+ eta: Optional[float] = 0.0,
170
+ generator: Optional[torch.Generator] = None,
171
+ output_type: Optional[str] = "pil",
172
+ return_dict: bool = True,
173
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
174
+ callback_steps: int = 1,
175
+ **kwargs,
176
+ ):
177
+ # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline
178
+ return StableDiffusionImg2ImgPipeline(**self.components)(
179
+ prompt=prompt,
180
+ image=image,
181
+ strength=strength,
182
+ num_inference_steps=num_inference_steps,
183
+ guidance_scale=guidance_scale,
184
+ negative_prompt=negative_prompt,
185
+ num_images_per_prompt=num_images_per_prompt,
186
+ eta=eta,
187
+ generator=generator,
188
+ output_type=output_type,
189
+ return_dict=return_dict,
190
+ callback=callback,
191
+ callback_steps=callback_steps,
192
+ )
193
+
194
+ @torch.no_grad()
195
+ def text2img(
196
+ self,
197
+ prompt: Union[str, List[str]],
198
+ height: int = 512,
199
+ width: int = 512,
200
+ num_inference_steps: int = 50,
201
+ guidance_scale: float = 7.5,
202
+ negative_prompt: Optional[Union[str, List[str]]] = None,
203
+ num_images_per_prompt: Optional[int] = 1,
204
+ eta: float = 0.0,
205
+ generator: Optional[torch.Generator] = None,
206
+ latents: Optional[torch.FloatTensor] = None,
207
+ output_type: Optional[str] = "pil",
208
+ return_dict: bool = True,
209
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
210
+ callback_steps: int = 1,
211
+ ):
212
+ # For more information on how this function https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionPipeline
213
+ return StableDiffusionPipeline(**self.components)(
214
+ prompt=prompt,
215
+ height=height,
216
+ width=width,
217
+ num_inference_steps=num_inference_steps,
218
+ guidance_scale=guidance_scale,
219
+ negative_prompt=negative_prompt,
220
+ num_images_per_prompt=num_images_per_prompt,
221
+ eta=eta,
222
+ generator=generator,
223
+ latents=latents,
224
+ output_type=output_type,
225
+ return_dict=return_dict,
226
+ callback=callback,
227
+ callback_steps=callback_steps,
228
+ )
v0.24.0/stable_diffusion_reference.py ADDED
@@ -0,0 +1,797 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inspired by: https://github.com/Mikubill/sd-webui-controlnet/discussions/1236 and https://github.com/Mikubill/sd-webui-controlnet/discussions/1280
2
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+ import torch
7
+
8
+ from diffusers import StableDiffusionPipeline
9
+ from diffusers.models.attention import BasicTransformerBlock
10
+ from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D
11
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
12
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg
13
+ from diffusers.utils import PIL_INTERPOLATION, logging
14
+ from diffusers.utils.torch_utils import randn_tensor
15
+
16
+
17
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
18
+
19
+ EXAMPLE_DOC_STRING = """
20
+ Examples:
21
+ ```py
22
+ >>> import torch
23
+ >>> from diffusers import UniPCMultistepScheduler
24
+ >>> from diffusers.utils import load_image
25
+
26
+ >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
27
+
28
+ >>> pipe = StableDiffusionReferencePipeline.from_pretrained(
29
+ "runwayml/stable-diffusion-v1-5",
30
+ safety_checker=None,
31
+ torch_dtype=torch.float16
32
+ ).to('cuda:0')
33
+
34
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config)
35
+
36
+ >>> result_img = pipe(ref_image=input_image,
37
+ prompt="1girl",
38
+ num_inference_steps=20,
39
+ reference_attn=True,
40
+ reference_adain=True).images[0]
41
+
42
+ >>> result_img.show()
43
+ ```
44
+ """
45
+
46
+
47
+ def torch_dfs(model: torch.nn.Module):
48
+ result = [model]
49
+ for child in model.children():
50
+ result += torch_dfs(child)
51
+ return result
52
+
53
+
54
+ class StableDiffusionReferencePipeline(StableDiffusionPipeline):
55
+ def _default_height_width(self, height, width, image):
56
+ # NOTE: It is possible that a list of images have different
57
+ # dimensions for each image, so just checking the first image
58
+ # is not _exactly_ correct, but it is simple.
59
+ while isinstance(image, list):
60
+ image = image[0]
61
+
62
+ if height is None:
63
+ if isinstance(image, PIL.Image.Image):
64
+ height = image.height
65
+ elif isinstance(image, torch.Tensor):
66
+ height = image.shape[2]
67
+
68
+ height = (height // 8) * 8 # round down to nearest multiple of 8
69
+
70
+ if width is None:
71
+ if isinstance(image, PIL.Image.Image):
72
+ width = image.width
73
+ elif isinstance(image, torch.Tensor):
74
+ width = image.shape[3]
75
+
76
+ width = (width // 8) * 8 # round down to nearest multiple of 8
77
+
78
+ return height, width
79
+
80
+ def prepare_image(
81
+ self,
82
+ image,
83
+ width,
84
+ height,
85
+ batch_size,
86
+ num_images_per_prompt,
87
+ device,
88
+ dtype,
89
+ do_classifier_free_guidance=False,
90
+ guess_mode=False,
91
+ ):
92
+ if not isinstance(image, torch.Tensor):
93
+ if isinstance(image, PIL.Image.Image):
94
+ image = [image]
95
+
96
+ if isinstance(image[0], PIL.Image.Image):
97
+ images = []
98
+
99
+ for image_ in image:
100
+ image_ = image_.convert("RGB")
101
+ image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
102
+ image_ = np.array(image_)
103
+ image_ = image_[None, :]
104
+ images.append(image_)
105
+
106
+ image = images
107
+
108
+ image = np.concatenate(image, axis=0)
109
+ image = np.array(image).astype(np.float32) / 255.0
110
+ image = (image - 0.5) / 0.5
111
+ image = image.transpose(0, 3, 1, 2)
112
+ image = torch.from_numpy(image)
113
+ elif isinstance(image[0], torch.Tensor):
114
+ image = torch.cat(image, dim=0)
115
+
116
+ image_batch_size = image.shape[0]
117
+
118
+ if image_batch_size == 1:
119
+ repeat_by = batch_size
120
+ else:
121
+ # image batch size is the same as prompt batch size
122
+ repeat_by = num_images_per_prompt
123
+
124
+ image = image.repeat_interleave(repeat_by, dim=0)
125
+
126
+ image = image.to(device=device, dtype=dtype)
127
+
128
+ if do_classifier_free_guidance and not guess_mode:
129
+ image = torch.cat([image] * 2)
130
+
131
+ return image
132
+
133
+ def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
134
+ refimage = refimage.to(device=device, dtype=dtype)
135
+
136
+ # encode the mask image into latents space so we can concatenate it to the latents
137
+ if isinstance(generator, list):
138
+ ref_image_latents = [
139
+ self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i])
140
+ for i in range(batch_size)
141
+ ]
142
+ ref_image_latents = torch.cat(ref_image_latents, dim=0)
143
+ else:
144
+ ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator)
145
+ ref_image_latents = self.vae.config.scaling_factor * ref_image_latents
146
+
147
+ # duplicate mask and ref_image_latents for each generation per prompt, using mps friendly method
148
+ if ref_image_latents.shape[0] < batch_size:
149
+ if not batch_size % ref_image_latents.shape[0] == 0:
150
+ raise ValueError(
151
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
152
+ f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed."
153
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
154
+ )
155
+ ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1)
156
+
157
+ # aligning device to prevent device errors when concating it with the latent model input
158
+ ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
159
+ return ref_image_latents
160
+
161
+ @torch.no_grad()
162
+ def __call__(
163
+ self,
164
+ prompt: Union[str, List[str]] = None,
165
+ ref_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
166
+ height: Optional[int] = None,
167
+ width: Optional[int] = None,
168
+ num_inference_steps: int = 50,
169
+ guidance_scale: float = 7.5,
170
+ negative_prompt: Optional[Union[str, List[str]]] = None,
171
+ num_images_per_prompt: Optional[int] = 1,
172
+ eta: float = 0.0,
173
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
174
+ latents: Optional[torch.FloatTensor] = None,
175
+ prompt_embeds: Optional[torch.FloatTensor] = None,
176
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
177
+ output_type: Optional[str] = "pil",
178
+ return_dict: bool = True,
179
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
180
+ callback_steps: int = 1,
181
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
182
+ guidance_rescale: float = 0.0,
183
+ attention_auto_machine_weight: float = 1.0,
184
+ gn_auto_machine_weight: float = 1.0,
185
+ style_fidelity: float = 0.5,
186
+ reference_attn: bool = True,
187
+ reference_adain: bool = True,
188
+ ):
189
+ r"""
190
+ Function invoked when calling the pipeline for generation.
191
+
192
+ Args:
193
+ prompt (`str` or `List[str]`, *optional*):
194
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
195
+ instead.
196
+ ref_image (`torch.FloatTensor`, `PIL.Image.Image`):
197
+ The Reference Control input condition. Reference Control uses this input condition to generate guidance to Unet. If
198
+ the type is specified as `Torch.FloatTensor`, it is passed to Reference Control as is. `PIL.Image.Image` can
199
+ also be accepted as an image.
200
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
201
+ The height in pixels of the generated image.
202
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
203
+ The width in pixels of the generated image.
204
+ num_inference_steps (`int`, *optional*, defaults to 50):
205
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
206
+ expense of slower inference.
207
+ guidance_scale (`float`, *optional*, defaults to 7.5):
208
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
209
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
210
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
211
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
212
+ usually at the expense of lower image quality.
213
+ negative_prompt (`str` or `List[str]`, *optional*):
214
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
215
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
216
+ less than `1`).
217
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
218
+ The number of images to generate per prompt.
219
+ eta (`float`, *optional*, defaults to 0.0):
220
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
221
+ [`schedulers.DDIMScheduler`], will be ignored for others.
222
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
223
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
224
+ to make generation deterministic.
225
+ latents (`torch.FloatTensor`, *optional*):
226
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
227
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
228
+ tensor will ge generated by sampling using the supplied random `generator`.
229
+ prompt_embeds (`torch.FloatTensor`, *optional*):
230
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
231
+ provided, text embeddings will be generated from `prompt` input argument.
232
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
233
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
234
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
235
+ argument.
236
+ output_type (`str`, *optional*, defaults to `"pil"`):
237
+ The output format of the generate image. Choose between
238
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
239
+ return_dict (`bool`, *optional*, defaults to `True`):
240
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
241
+ plain tuple.
242
+ callback (`Callable`, *optional*):
243
+ A function that will be called every `callback_steps` steps during inference. The function will be
244
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
245
+ callback_steps (`int`, *optional*, defaults to 1):
246
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
247
+ called at every step.
248
+ cross_attention_kwargs (`dict`, *optional*):
249
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
250
+ `self.processor` in
251
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
252
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
253
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
254
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
255
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
256
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
257
+ attention_auto_machine_weight (`float`):
258
+ Weight of using reference query for self attention's context.
259
+ If attention_auto_machine_weight=1.0, use reference query for all self attention's context.
260
+ gn_auto_machine_weight (`float`):
261
+ Weight of using reference adain. If gn_auto_machine_weight=2.0, use all reference adain plugins.
262
+ style_fidelity (`float`):
263
+ style fidelity of ref_uncond_xt. If style_fidelity=1.0, control more important,
264
+ elif style_fidelity=0.0, prompt more important, else balanced.
265
+ reference_attn (`bool`):
266
+ Whether to use reference query for self attention's context.
267
+ reference_adain (`bool`):
268
+ Whether to use reference adain.
269
+
270
+ Examples:
271
+
272
+ Returns:
273
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
274
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
275
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
276
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
277
+ (nsfw) content, according to the `safety_checker`.
278
+ """
279
+ assert reference_attn or reference_adain, "`reference_attn` or `reference_adain` must be True."
280
+
281
+ # 0. Default height and width to unet
282
+ height, width = self._default_height_width(height, width, ref_image)
283
+
284
+ # 1. Check inputs. Raise error if not correct
285
+ self.check_inputs(
286
+ prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
287
+ )
288
+
289
+ # 2. Define call parameters
290
+ if prompt is not None and isinstance(prompt, str):
291
+ batch_size = 1
292
+ elif prompt is not None and isinstance(prompt, list):
293
+ batch_size = len(prompt)
294
+ else:
295
+ batch_size = prompt_embeds.shape[0]
296
+
297
+ device = self._execution_device
298
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
299
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
300
+ # corresponds to doing no classifier free guidance.
301
+ do_classifier_free_guidance = guidance_scale > 1.0
302
+
303
+ # 3. Encode input prompt
304
+ text_encoder_lora_scale = (
305
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
306
+ )
307
+ prompt_embeds = self._encode_prompt(
308
+ prompt,
309
+ device,
310
+ num_images_per_prompt,
311
+ do_classifier_free_guidance,
312
+ negative_prompt,
313
+ prompt_embeds=prompt_embeds,
314
+ negative_prompt_embeds=negative_prompt_embeds,
315
+ lora_scale=text_encoder_lora_scale,
316
+ )
317
+
318
+ # 4. Preprocess reference image
319
+ ref_image = self.prepare_image(
320
+ image=ref_image,
321
+ width=width,
322
+ height=height,
323
+ batch_size=batch_size * num_images_per_prompt,
324
+ num_images_per_prompt=num_images_per_prompt,
325
+ device=device,
326
+ dtype=prompt_embeds.dtype,
327
+ )
328
+
329
+ # 5. Prepare timesteps
330
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
331
+ timesteps = self.scheduler.timesteps
332
+
333
+ # 6. Prepare latent variables
334
+ num_channels_latents = self.unet.config.in_channels
335
+ latents = self.prepare_latents(
336
+ batch_size * num_images_per_prompt,
337
+ num_channels_latents,
338
+ height,
339
+ width,
340
+ prompt_embeds.dtype,
341
+ device,
342
+ generator,
343
+ latents,
344
+ )
345
+
346
+ # 7. Prepare reference latent variables
347
+ ref_image_latents = self.prepare_ref_latents(
348
+ ref_image,
349
+ batch_size * num_images_per_prompt,
350
+ prompt_embeds.dtype,
351
+ device,
352
+ generator,
353
+ do_classifier_free_guidance,
354
+ )
355
+
356
+ # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
357
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
358
+
359
+ # 9. Modify self attention and group norm
360
+ MODE = "write"
361
+ uc_mask = (
362
+ torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt)
363
+ .type_as(ref_image_latents)
364
+ .bool()
365
+ )
366
+
367
+ def hacked_basic_transformer_inner_forward(
368
+ self,
369
+ hidden_states: torch.FloatTensor,
370
+ attention_mask: Optional[torch.FloatTensor] = None,
371
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
372
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
373
+ timestep: Optional[torch.LongTensor] = None,
374
+ cross_attention_kwargs: Dict[str, Any] = None,
375
+ class_labels: Optional[torch.LongTensor] = None,
376
+ ):
377
+ if self.use_ada_layer_norm:
378
+ norm_hidden_states = self.norm1(hidden_states, timestep)
379
+ elif self.use_ada_layer_norm_zero:
380
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
381
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
382
+ )
383
+ else:
384
+ norm_hidden_states = self.norm1(hidden_states)
385
+
386
+ # 1. Self-Attention
387
+ cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
388
+ if self.only_cross_attention:
389
+ attn_output = self.attn1(
390
+ norm_hidden_states,
391
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
392
+ attention_mask=attention_mask,
393
+ **cross_attention_kwargs,
394
+ )
395
+ else:
396
+ if MODE == "write":
397
+ self.bank.append(norm_hidden_states.detach().clone())
398
+ attn_output = self.attn1(
399
+ norm_hidden_states,
400
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
401
+ attention_mask=attention_mask,
402
+ **cross_attention_kwargs,
403
+ )
404
+ if MODE == "read":
405
+ if attention_auto_machine_weight > self.attn_weight:
406
+ attn_output_uc = self.attn1(
407
+ norm_hidden_states,
408
+ encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),
409
+ # attention_mask=attention_mask,
410
+ **cross_attention_kwargs,
411
+ )
412
+ attn_output_c = attn_output_uc.clone()
413
+ if do_classifier_free_guidance and style_fidelity > 0:
414
+ attn_output_c[uc_mask] = self.attn1(
415
+ norm_hidden_states[uc_mask],
416
+ encoder_hidden_states=norm_hidden_states[uc_mask],
417
+ **cross_attention_kwargs,
418
+ )
419
+ attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc
420
+ self.bank.clear()
421
+ else:
422
+ attn_output = self.attn1(
423
+ norm_hidden_states,
424
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
425
+ attention_mask=attention_mask,
426
+ **cross_attention_kwargs,
427
+ )
428
+ if self.use_ada_layer_norm_zero:
429
+ attn_output = gate_msa.unsqueeze(1) * attn_output
430
+ hidden_states = attn_output + hidden_states
431
+
432
+ if self.attn2 is not None:
433
+ norm_hidden_states = (
434
+ self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
435
+ )
436
+
437
+ # 2. Cross-Attention
438
+ attn_output = self.attn2(
439
+ norm_hidden_states,
440
+ encoder_hidden_states=encoder_hidden_states,
441
+ attention_mask=encoder_attention_mask,
442
+ **cross_attention_kwargs,
443
+ )
444
+ hidden_states = attn_output + hidden_states
445
+
446
+ # 3. Feed-forward
447
+ norm_hidden_states = self.norm3(hidden_states)
448
+
449
+ if self.use_ada_layer_norm_zero:
450
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
451
+
452
+ ff_output = self.ff(norm_hidden_states)
453
+
454
+ if self.use_ada_layer_norm_zero:
455
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
456
+
457
+ hidden_states = ff_output + hidden_states
458
+
459
+ return hidden_states
460
+
461
+ def hacked_mid_forward(self, *args, **kwargs):
462
+ eps = 1e-6
463
+ x = self.original_forward(*args, **kwargs)
464
+ if MODE == "write":
465
+ if gn_auto_machine_weight >= self.gn_weight:
466
+ var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
467
+ self.mean_bank.append(mean)
468
+ self.var_bank.append(var)
469
+ if MODE == "read":
470
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
471
+ var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
472
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
473
+ mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))
474
+ var_acc = sum(self.var_bank) / float(len(self.var_bank))
475
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
476
+ x_uc = (((x - mean) / std) * std_acc) + mean_acc
477
+ x_c = x_uc.clone()
478
+ if do_classifier_free_guidance and style_fidelity > 0:
479
+ x_c[uc_mask] = x[uc_mask]
480
+ x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc
481
+ self.mean_bank = []
482
+ self.var_bank = []
483
+ return x
484
+
485
+ def hack_CrossAttnDownBlock2D_forward(
486
+ self,
487
+ hidden_states: torch.FloatTensor,
488
+ temb: Optional[torch.FloatTensor] = None,
489
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
490
+ attention_mask: Optional[torch.FloatTensor] = None,
491
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
492
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
493
+ ):
494
+ eps = 1e-6
495
+
496
+ # TODO(Patrick, William) - attention mask is not used
497
+ output_states = ()
498
+
499
+ for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
500
+ hidden_states = resnet(hidden_states, temb)
501
+ hidden_states = attn(
502
+ hidden_states,
503
+ encoder_hidden_states=encoder_hidden_states,
504
+ cross_attention_kwargs=cross_attention_kwargs,
505
+ attention_mask=attention_mask,
506
+ encoder_attention_mask=encoder_attention_mask,
507
+ return_dict=False,
508
+ )[0]
509
+ if MODE == "write":
510
+ if gn_auto_machine_weight >= self.gn_weight:
511
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
512
+ self.mean_bank.append([mean])
513
+ self.var_bank.append([var])
514
+ if MODE == "read":
515
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
516
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
517
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
518
+ mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
519
+ var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
520
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
521
+ hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
522
+ hidden_states_c = hidden_states_uc.clone()
523
+ if do_classifier_free_guidance and style_fidelity > 0:
524
+ hidden_states_c[uc_mask] = hidden_states[uc_mask]
525
+ hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
526
+
527
+ output_states = output_states + (hidden_states,)
528
+
529
+ if MODE == "read":
530
+ self.mean_bank = []
531
+ self.var_bank = []
532
+
533
+ if self.downsamplers is not None:
534
+ for downsampler in self.downsamplers:
535
+ hidden_states = downsampler(hidden_states)
536
+
537
+ output_states = output_states + (hidden_states,)
538
+
539
+ return hidden_states, output_states
540
+
541
+ def hacked_DownBlock2D_forward(self, hidden_states, temb=None):
542
+ eps = 1e-6
543
+
544
+ output_states = ()
545
+
546
+ for i, resnet in enumerate(self.resnets):
547
+ hidden_states = resnet(hidden_states, temb)
548
+
549
+ if MODE == "write":
550
+ if gn_auto_machine_weight >= self.gn_weight:
551
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
552
+ self.mean_bank.append([mean])
553
+ self.var_bank.append([var])
554
+ if MODE == "read":
555
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
556
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
557
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
558
+ mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
559
+ var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
560
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
561
+ hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
562
+ hidden_states_c = hidden_states_uc.clone()
563
+ if do_classifier_free_guidance and style_fidelity > 0:
564
+ hidden_states_c[uc_mask] = hidden_states[uc_mask]
565
+ hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
566
+
567
+ output_states = output_states + (hidden_states,)
568
+
569
+ if MODE == "read":
570
+ self.mean_bank = []
571
+ self.var_bank = []
572
+
573
+ if self.downsamplers is not None:
574
+ for downsampler in self.downsamplers:
575
+ hidden_states = downsampler(hidden_states)
576
+
577
+ output_states = output_states + (hidden_states,)
578
+
579
+ return hidden_states, output_states
580
+
581
+ def hacked_CrossAttnUpBlock2D_forward(
582
+ self,
583
+ hidden_states: torch.FloatTensor,
584
+ res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
585
+ temb: Optional[torch.FloatTensor] = None,
586
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
587
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
588
+ upsample_size: Optional[int] = None,
589
+ attention_mask: Optional[torch.FloatTensor] = None,
590
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
591
+ ):
592
+ eps = 1e-6
593
+ # TODO(Patrick, William) - attention mask is not used
594
+ for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
595
+ # pop res hidden states
596
+ res_hidden_states = res_hidden_states_tuple[-1]
597
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
598
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
599
+ hidden_states = resnet(hidden_states, temb)
600
+ hidden_states = attn(
601
+ hidden_states,
602
+ encoder_hidden_states=encoder_hidden_states,
603
+ cross_attention_kwargs=cross_attention_kwargs,
604
+ attention_mask=attention_mask,
605
+ encoder_attention_mask=encoder_attention_mask,
606
+ return_dict=False,
607
+ )[0]
608
+
609
+ if MODE == "write":
610
+ if gn_auto_machine_weight >= self.gn_weight:
611
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
612
+ self.mean_bank.append([mean])
613
+ self.var_bank.append([var])
614
+ if MODE == "read":
615
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
616
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
617
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
618
+ mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
619
+ var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
620
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
621
+ hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
622
+ hidden_states_c = hidden_states_uc.clone()
623
+ if do_classifier_free_guidance and style_fidelity > 0:
624
+ hidden_states_c[uc_mask] = hidden_states[uc_mask]
625
+ hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
626
+
627
+ if MODE == "read":
628
+ self.mean_bank = []
629
+ self.var_bank = []
630
+
631
+ if self.upsamplers is not None:
632
+ for upsampler in self.upsamplers:
633
+ hidden_states = upsampler(hidden_states, upsample_size)
634
+
635
+ return hidden_states
636
+
637
+ def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
638
+ eps = 1e-6
639
+ for i, resnet in enumerate(self.resnets):
640
+ # pop res hidden states
641
+ res_hidden_states = res_hidden_states_tuple[-1]
642
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
643
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
644
+ hidden_states = resnet(hidden_states, temb)
645
+
646
+ if MODE == "write":
647
+ if gn_auto_machine_weight >= self.gn_weight:
648
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
649
+ self.mean_bank.append([mean])
650
+ self.var_bank.append([var])
651
+ if MODE == "read":
652
+ if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
653
+ var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
654
+ std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
655
+ mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
656
+ var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
657
+ std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
658
+ hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
659
+ hidden_states_c = hidden_states_uc.clone()
660
+ if do_classifier_free_guidance and style_fidelity > 0:
661
+ hidden_states_c[uc_mask] = hidden_states[uc_mask]
662
+ hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
663
+
664
+ if MODE == "read":
665
+ self.mean_bank = []
666
+ self.var_bank = []
667
+
668
+ if self.upsamplers is not None:
669
+ for upsampler in self.upsamplers:
670
+ hidden_states = upsampler(hidden_states, upsample_size)
671
+
672
+ return hidden_states
673
+
674
+ if reference_attn:
675
+ attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)]
676
+ attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])
677
+
678
+ for i, module in enumerate(attn_modules):
679
+ module._original_inner_forward = module.forward
680
+ module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)
681
+ module.bank = []
682
+ module.attn_weight = float(i) / float(len(attn_modules))
683
+
684
+ if reference_adain:
685
+ gn_modules = [self.unet.mid_block]
686
+ self.unet.mid_block.gn_weight = 0
687
+
688
+ down_blocks = self.unet.down_blocks
689
+ for w, module in enumerate(down_blocks):
690
+ module.gn_weight = 1.0 - float(w) / float(len(down_blocks))
691
+ gn_modules.append(module)
692
+
693
+ up_blocks = self.unet.up_blocks
694
+ for w, module in enumerate(up_blocks):
695
+ module.gn_weight = float(w) / float(len(up_blocks))
696
+ gn_modules.append(module)
697
+
698
+ for i, module in enumerate(gn_modules):
699
+ if getattr(module, "original_forward", None) is None:
700
+ module.original_forward = module.forward
701
+ if i == 0:
702
+ # mid_block
703
+ module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)
704
+ elif isinstance(module, CrossAttnDownBlock2D):
705
+ module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)
706
+ elif isinstance(module, DownBlock2D):
707
+ module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)
708
+ elif isinstance(module, CrossAttnUpBlock2D):
709
+ module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)
710
+ elif isinstance(module, UpBlock2D):
711
+ module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)
712
+ module.mean_bank = []
713
+ module.var_bank = []
714
+ module.gn_weight *= 2
715
+
716
+ # 10. Denoising loop
717
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
718
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
719
+ for i, t in enumerate(timesteps):
720
+ # expand the latents if we are doing classifier free guidance
721
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
722
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
723
+
724
+ # ref only part
725
+ noise = randn_tensor(
726
+ ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype
727
+ )
728
+ ref_xt = self.scheduler.add_noise(
729
+ ref_image_latents,
730
+ noise,
731
+ t.reshape(
732
+ 1,
733
+ ),
734
+ )
735
+ ref_xt = torch.cat([ref_xt] * 2) if do_classifier_free_guidance else ref_xt
736
+ ref_xt = self.scheduler.scale_model_input(ref_xt, t)
737
+
738
+ MODE = "write"
739
+ self.unet(
740
+ ref_xt,
741
+ t,
742
+ encoder_hidden_states=prompt_embeds,
743
+ cross_attention_kwargs=cross_attention_kwargs,
744
+ return_dict=False,
745
+ )
746
+
747
+ # predict the noise residual
748
+ MODE = "read"
749
+ noise_pred = self.unet(
750
+ latent_model_input,
751
+ t,
752
+ encoder_hidden_states=prompt_embeds,
753
+ cross_attention_kwargs=cross_attention_kwargs,
754
+ return_dict=False,
755
+ )[0]
756
+
757
+ # perform guidance
758
+ if do_classifier_free_guidance:
759
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
760
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
761
+
762
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
763
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
764
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
765
+
766
+ # compute the previous noisy sample x_t -> x_t-1
767
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
768
+
769
+ # call the callback, if provided
770
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
771
+ progress_bar.update()
772
+ if callback is not None and i % callback_steps == 0:
773
+ step_idx = i // getattr(self.scheduler, "order", 1)
774
+ callback(step_idx, t, latents)
775
+
776
+ if not output_type == "latent":
777
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
778
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
779
+ else:
780
+ image = latents
781
+ has_nsfw_concept = None
782
+
783
+ if has_nsfw_concept is None:
784
+ do_denormalize = [True] * image.shape[0]
785
+ else:
786
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
787
+
788
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
789
+
790
+ # Offload last model to CPU
791
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
792
+ self.final_offload_hook.offload()
793
+
794
+ if not return_dict:
795
+ return (image, has_nsfw_concept)
796
+
797
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/stable_diffusion_repaint.py ADDED
@@ -0,0 +1,958 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Callable, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ from packaging import version
22
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
23
+
24
+ from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel
25
+ from diffusers.configuration_utils import FrozenDict, deprecate
26
+ from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
27
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
28
+ from diffusers.pipelines.stable_diffusion.safety_checker import (
29
+ StableDiffusionSafetyChecker,
30
+ )
31
+ from diffusers.schedulers import KarrasDiffusionSchedulers
32
+ from diffusers.utils import (
33
+ is_accelerate_available,
34
+ is_accelerate_version,
35
+ logging,
36
+ )
37
+ from diffusers.utils.torch_utils import randn_tensor
38
+
39
+
40
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
+
42
+
43
+ def prepare_mask_and_masked_image(image, mask):
44
+ """
45
+ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
46
+ converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
47
+ ``image`` and ``1`` for the ``mask``.
48
+ The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
49
+ binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
50
+ Args:
51
+ image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
52
+ It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
53
+ ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
54
+ mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
55
+ It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
56
+ ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
57
+ Raises:
58
+ ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
59
+ should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
60
+ TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
61
+ (ot the other way around).
62
+ Returns:
63
+ tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
64
+ dimensions: ``batch x channels x height x width``.
65
+ """
66
+ if isinstance(image, torch.Tensor):
67
+ if not isinstance(mask, torch.Tensor):
68
+ raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
69
+
70
+ # Batch single image
71
+ if image.ndim == 3:
72
+ assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
73
+ image = image.unsqueeze(0)
74
+
75
+ # Batch and add channel dim for single mask
76
+ if mask.ndim == 2:
77
+ mask = mask.unsqueeze(0).unsqueeze(0)
78
+
79
+ # Batch single mask or add channel dim
80
+ if mask.ndim == 3:
81
+ # Single batched mask, no channel dim or single mask not batched but channel dim
82
+ if mask.shape[0] == 1:
83
+ mask = mask.unsqueeze(0)
84
+
85
+ # Batched masks no channel dim
86
+ else:
87
+ mask = mask.unsqueeze(1)
88
+
89
+ assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
90
+ assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
91
+ assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
92
+
93
+ # Check image is in [-1, 1]
94
+ if image.min() < -1 or image.max() > 1:
95
+ raise ValueError("Image should be in [-1, 1] range")
96
+
97
+ # Check mask is in [0, 1]
98
+ if mask.min() < 0 or mask.max() > 1:
99
+ raise ValueError("Mask should be in [0, 1] range")
100
+
101
+ # Binarize mask
102
+ mask[mask < 0.5] = 0
103
+ mask[mask >= 0.5] = 1
104
+
105
+ # Image as float32
106
+ image = image.to(dtype=torch.float32)
107
+ elif isinstance(mask, torch.Tensor):
108
+ raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
109
+ else:
110
+ # preprocess image
111
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
112
+ image = [image]
113
+
114
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
115
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
116
+ image = np.concatenate(image, axis=0)
117
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
118
+ image = np.concatenate([i[None, :] for i in image], axis=0)
119
+
120
+ image = image.transpose(0, 3, 1, 2)
121
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
122
+
123
+ # preprocess mask
124
+ if isinstance(mask, (PIL.Image.Image, np.ndarray)):
125
+ mask = [mask]
126
+
127
+ if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
128
+ mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
129
+ mask = mask.astype(np.float32) / 255.0
130
+ elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
131
+ mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
132
+
133
+ mask[mask < 0.5] = 0
134
+ mask[mask >= 0.5] = 1
135
+ mask = torch.from_numpy(mask)
136
+
137
+ # masked_image = image * (mask >= 0.5)
138
+ masked_image = image
139
+
140
+ return mask, masked_image
141
+
142
+
143
+ class StableDiffusionRepaintPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
144
+ r"""
145
+ Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
146
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
147
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
148
+ In addition the pipeline inherits the following loading methods:
149
+ - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
150
+ - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
151
+ as well as the following saving methods:
152
+ - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
153
+ Args:
154
+ vae ([`AutoencoderKL`]):
155
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
156
+ text_encoder ([`CLIPTextModel`]):
157
+ Frozen text-encoder. Stable Diffusion uses the text portion of
158
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
159
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
160
+ tokenizer (`CLIPTokenizer`):
161
+ Tokenizer of class
162
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
163
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
164
+ scheduler ([`SchedulerMixin`]):
165
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
166
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
167
+ safety_checker ([`StableDiffusionSafetyChecker`]):
168
+ Classification module that estimates whether generated images could be considered offensive or harmful.
169
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
170
+ feature_extractor ([`CLIPImageProcessor`]):
171
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
172
+ """
173
+
174
+ _optional_components = ["safety_checker", "feature_extractor"]
175
+
176
+ def __init__(
177
+ self,
178
+ vae: AutoencoderKL,
179
+ text_encoder: CLIPTextModel,
180
+ tokenizer: CLIPTokenizer,
181
+ unet: UNet2DConditionModel,
182
+ scheduler: KarrasDiffusionSchedulers,
183
+ safety_checker: StableDiffusionSafetyChecker,
184
+ feature_extractor: CLIPImageProcessor,
185
+ requires_safety_checker: bool = True,
186
+ ):
187
+ super().__init__()
188
+
189
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
190
+ deprecation_message = (
191
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
192
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
193
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
194
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
195
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
196
+ " file"
197
+ )
198
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
199
+ new_config = dict(scheduler.config)
200
+ new_config["steps_offset"] = 1
201
+ scheduler._internal_dict = FrozenDict(new_config)
202
+
203
+ if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
204
+ deprecation_message = (
205
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration"
206
+ " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
207
+ " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
208
+ " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
209
+ " Hub, it would be very nice if you could open a Pull request for the"
210
+ " `scheduler/scheduler_config.json` file"
211
+ )
212
+ deprecate(
213
+ "skip_prk_steps not set",
214
+ "1.0.0",
215
+ deprecation_message,
216
+ standard_warn=False,
217
+ )
218
+ new_config = dict(scheduler.config)
219
+ new_config["skip_prk_steps"] = True
220
+ scheduler._internal_dict = FrozenDict(new_config)
221
+
222
+ if safety_checker is None and requires_safety_checker:
223
+ logger.warning(
224
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
225
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
226
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
227
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
228
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
229
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
230
+ )
231
+
232
+ if safety_checker is not None and feature_extractor is None:
233
+ raise ValueError(
234
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
235
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
236
+ )
237
+
238
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
239
+ version.parse(unet.config._diffusers_version).base_version
240
+ ) < version.parse("0.9.0.dev0")
241
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
242
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
243
+ deprecation_message = (
244
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
245
+ " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
246
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
247
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
248
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
249
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
250
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
251
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
252
+ " the `unet/config.json` file"
253
+ )
254
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
255
+ new_config = dict(unet.config)
256
+ new_config["sample_size"] = 64
257
+ unet._internal_dict = FrozenDict(new_config)
258
+ # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4
259
+ if unet.config.in_channels != 4:
260
+ logger.warning(
261
+ f"You have loaded a UNet with {unet.config.in_channels} input channels, whereas by default,"
262
+ f" {self.__class__} assumes that `pipeline.unet` has 4 input channels: 4 for `num_channels_latents`,"
263
+ ". If you did not intend to modify"
264
+ " this behavior, please check whether you have loaded the right checkpoint."
265
+ )
266
+
267
+ self.register_modules(
268
+ vae=vae,
269
+ text_encoder=text_encoder,
270
+ tokenizer=tokenizer,
271
+ unet=unet,
272
+ scheduler=scheduler,
273
+ safety_checker=safety_checker,
274
+ feature_extractor=feature_extractor,
275
+ )
276
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
277
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
278
+
279
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
280
+ def enable_sequential_cpu_offload(self, gpu_id=0):
281
+ r"""
282
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
283
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
284
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
285
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
286
+ `enable_model_cpu_offload`, but performance is lower.
287
+ """
288
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
289
+ from accelerate import cpu_offload
290
+ else:
291
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
292
+
293
+ device = torch.device(f"cuda:{gpu_id}")
294
+
295
+ if self.device.type != "cpu":
296
+ self.to("cpu", silence_dtype_warnings=True)
297
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
298
+
299
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
300
+ cpu_offload(cpu_offloaded_model, device)
301
+
302
+ if self.safety_checker is not None:
303
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
304
+
305
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
306
+ def enable_model_cpu_offload(self, gpu_id=0):
307
+ r"""
308
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
309
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
310
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
311
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
312
+ """
313
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
314
+ from accelerate import cpu_offload_with_hook
315
+ else:
316
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
317
+
318
+ device = torch.device(f"cuda:{gpu_id}")
319
+
320
+ if self.device.type != "cpu":
321
+ self.to("cpu", silence_dtype_warnings=True)
322
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
323
+
324
+ hook = None
325
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
326
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
327
+
328
+ if self.safety_checker is not None:
329
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
330
+
331
+ # We'll offload the last model manually.
332
+ self.final_offload_hook = hook
333
+
334
+ @property
335
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
336
+ def _execution_device(self):
337
+ r"""
338
+ Returns the device on which the pipeline's models will be executed. After calling
339
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
340
+ hooks.
341
+ """
342
+ if not hasattr(self.unet, "_hf_hook"):
343
+ return self.device
344
+ for module in self.unet.modules():
345
+ if (
346
+ hasattr(module, "_hf_hook")
347
+ and hasattr(module._hf_hook, "execution_device")
348
+ and module._hf_hook.execution_device is not None
349
+ ):
350
+ return torch.device(module._hf_hook.execution_device)
351
+ return self.device
352
+
353
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
354
+ def _encode_prompt(
355
+ self,
356
+ prompt,
357
+ device,
358
+ num_images_per_prompt,
359
+ do_classifier_free_guidance,
360
+ negative_prompt=None,
361
+ prompt_embeds: Optional[torch.FloatTensor] = None,
362
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
363
+ ):
364
+ r"""
365
+ Encodes the prompt into text encoder hidden states.
366
+ Args:
367
+ prompt (`str` or `List[str]`, *optional*):
368
+ prompt to be encoded
369
+ device: (`torch.device`):
370
+ torch device
371
+ num_images_per_prompt (`int`):
372
+ number of images that should be generated per prompt
373
+ do_classifier_free_guidance (`bool`):
374
+ whether to use classifier free guidance or not
375
+ negative_prompt (`str` or `List[str]`, *optional*):
376
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
377
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
378
+ less than `1`).
379
+ prompt_embeds (`torch.FloatTensor`, *optional*):
380
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
381
+ provided, text embeddings will be generated from `prompt` input argument.
382
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
383
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
384
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
385
+ argument.
386
+ """
387
+ if prompt is not None and isinstance(prompt, str):
388
+ batch_size = 1
389
+ elif prompt is not None and isinstance(prompt, list):
390
+ batch_size = len(prompt)
391
+ else:
392
+ batch_size = prompt_embeds.shape[0]
393
+
394
+ if prompt_embeds is None:
395
+ # textual inversion: procecss multi-vector tokens if necessary
396
+ if isinstance(self, TextualInversionLoaderMixin):
397
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
398
+
399
+ text_inputs = self.tokenizer(
400
+ prompt,
401
+ padding="max_length",
402
+ max_length=self.tokenizer.model_max_length,
403
+ truncation=True,
404
+ return_tensors="pt",
405
+ )
406
+ text_input_ids = text_inputs.input_ids
407
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
408
+
409
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
410
+ text_input_ids, untruncated_ids
411
+ ):
412
+ removed_text = self.tokenizer.batch_decode(
413
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
414
+ )
415
+ logger.warning(
416
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
417
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
418
+ )
419
+
420
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
421
+ attention_mask = text_inputs.attention_mask.to(device)
422
+ else:
423
+ attention_mask = None
424
+
425
+ prompt_embeds = self.text_encoder(
426
+ text_input_ids.to(device),
427
+ attention_mask=attention_mask,
428
+ )
429
+ prompt_embeds = prompt_embeds[0]
430
+
431
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
432
+
433
+ bs_embed, seq_len, _ = prompt_embeds.shape
434
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
435
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
436
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
437
+
438
+ # get unconditional embeddings for classifier free guidance
439
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
440
+ uncond_tokens: List[str]
441
+ if negative_prompt is None:
442
+ uncond_tokens = [""] * batch_size
443
+ elif type(prompt) is not type(negative_prompt):
444
+ raise TypeError(
445
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
446
+ f" {type(prompt)}."
447
+ )
448
+ elif isinstance(negative_prompt, str):
449
+ uncond_tokens = [negative_prompt]
450
+ elif batch_size != len(negative_prompt):
451
+ raise ValueError(
452
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
453
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
454
+ " the batch size of `prompt`."
455
+ )
456
+ else:
457
+ uncond_tokens = negative_prompt
458
+
459
+ # textual inversion: procecss multi-vector tokens if necessary
460
+ if isinstance(self, TextualInversionLoaderMixin):
461
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
462
+
463
+ max_length = prompt_embeds.shape[1]
464
+ uncond_input = self.tokenizer(
465
+ uncond_tokens,
466
+ padding="max_length",
467
+ max_length=max_length,
468
+ truncation=True,
469
+ return_tensors="pt",
470
+ )
471
+
472
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
473
+ attention_mask = uncond_input.attention_mask.to(device)
474
+ else:
475
+ attention_mask = None
476
+
477
+ negative_prompt_embeds = self.text_encoder(
478
+ uncond_input.input_ids.to(device),
479
+ attention_mask=attention_mask,
480
+ )
481
+ negative_prompt_embeds = negative_prompt_embeds[0]
482
+
483
+ if do_classifier_free_guidance:
484
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
485
+ seq_len = negative_prompt_embeds.shape[1]
486
+
487
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
488
+
489
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
490
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
491
+
492
+ # For classifier free guidance, we need to do two forward passes.
493
+ # Here we concatenate the unconditional and text embeddings into a single batch
494
+ # to avoid doing two forward passes
495
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
496
+
497
+ return prompt_embeds
498
+
499
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
500
+ def run_safety_checker(self, image, device, dtype):
501
+ if self.safety_checker is not None:
502
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
503
+ image, has_nsfw_concept = self.safety_checker(
504
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
505
+ )
506
+ else:
507
+ has_nsfw_concept = None
508
+ return image, has_nsfw_concept
509
+
510
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
511
+ def prepare_extra_step_kwargs(self, generator, eta):
512
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
513
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
514
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
515
+ # and should be between [0, 1]
516
+
517
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
518
+ extra_step_kwargs = {}
519
+ if accepts_eta:
520
+ extra_step_kwargs["eta"] = eta
521
+
522
+ # check if the scheduler accepts generator
523
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
524
+ if accepts_generator:
525
+ extra_step_kwargs["generator"] = generator
526
+ return extra_step_kwargs
527
+
528
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
529
+ def decode_latents(self, latents):
530
+ latents = 1 / self.vae.config.scaling_factor * latents
531
+ image = self.vae.decode(latents).sample
532
+ image = (image / 2 + 0.5).clamp(0, 1)
533
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
534
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
535
+ return image
536
+
537
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
538
+ def check_inputs(
539
+ self,
540
+ prompt,
541
+ height,
542
+ width,
543
+ callback_steps,
544
+ negative_prompt=None,
545
+ prompt_embeds=None,
546
+ negative_prompt_embeds=None,
547
+ ):
548
+ if height % 8 != 0 or width % 8 != 0:
549
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
550
+
551
+ if (callback_steps is None) or (
552
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
553
+ ):
554
+ raise ValueError(
555
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
556
+ f" {type(callback_steps)}."
557
+ )
558
+
559
+ if prompt is not None and prompt_embeds is not None:
560
+ raise ValueError(
561
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
562
+ " only forward one of the two."
563
+ )
564
+ elif prompt is None and prompt_embeds is None:
565
+ raise ValueError(
566
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
567
+ )
568
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
569
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
570
+
571
+ if negative_prompt is not None and negative_prompt_embeds is not None:
572
+ raise ValueError(
573
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
574
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
575
+ )
576
+
577
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
578
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
579
+ raise ValueError(
580
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
581
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
582
+ f" {negative_prompt_embeds.shape}."
583
+ )
584
+
585
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
586
+ def prepare_latents(
587
+ self,
588
+ batch_size,
589
+ num_channels_latents,
590
+ height,
591
+ width,
592
+ dtype,
593
+ device,
594
+ generator,
595
+ latents=None,
596
+ ):
597
+ shape = (
598
+ batch_size,
599
+ num_channels_latents,
600
+ height // self.vae_scale_factor,
601
+ width // self.vae_scale_factor,
602
+ )
603
+ if isinstance(generator, list) and len(generator) != batch_size:
604
+ raise ValueError(
605
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
606
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
607
+ )
608
+
609
+ if latents is None:
610
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
611
+ else:
612
+ latents = latents.to(device)
613
+
614
+ # scale the initial noise by the standard deviation required by the scheduler
615
+ latents = latents * self.scheduler.init_noise_sigma
616
+ return latents
617
+
618
+ def prepare_mask_latents(
619
+ self,
620
+ mask,
621
+ masked_image,
622
+ batch_size,
623
+ height,
624
+ width,
625
+ dtype,
626
+ device,
627
+ generator,
628
+ do_classifier_free_guidance,
629
+ ):
630
+ # resize the mask to latents shape as we concatenate the mask to the latents
631
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
632
+ # and half precision
633
+ mask = torch.nn.functional.interpolate(
634
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
635
+ )
636
+ mask = mask.to(device=device, dtype=dtype)
637
+
638
+ masked_image = masked_image.to(device=device, dtype=dtype)
639
+
640
+ # encode the mask image into latents space so we can concatenate it to the latents
641
+ if isinstance(generator, list):
642
+ masked_image_latents = [
643
+ self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
644
+ for i in range(batch_size)
645
+ ]
646
+ masked_image_latents = torch.cat(masked_image_latents, dim=0)
647
+ else:
648
+ masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
649
+ masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
650
+
651
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
652
+ if mask.shape[0] < batch_size:
653
+ if not batch_size % mask.shape[0] == 0:
654
+ raise ValueError(
655
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
656
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
657
+ " of masks that you pass is divisible by the total requested batch size."
658
+ )
659
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
660
+ if masked_image_latents.shape[0] < batch_size:
661
+ if not batch_size % masked_image_latents.shape[0] == 0:
662
+ raise ValueError(
663
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
664
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
665
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
666
+ )
667
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
668
+
669
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
670
+ masked_image_latents = (
671
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
672
+ )
673
+
674
+ # aligning device to prevent device errors when concating it with the latent model input
675
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
676
+ return mask, masked_image_latents
677
+
678
+ @torch.no_grad()
679
+ def __call__(
680
+ self,
681
+ prompt: Union[str, List[str]] = None,
682
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
683
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
684
+ height: Optional[int] = None,
685
+ width: Optional[int] = None,
686
+ num_inference_steps: int = 50,
687
+ jump_length: Optional[int] = 10,
688
+ jump_n_sample: Optional[int] = 10,
689
+ guidance_scale: float = 7.5,
690
+ negative_prompt: Optional[Union[str, List[str]]] = None,
691
+ num_images_per_prompt: Optional[int] = 1,
692
+ eta: float = 0.0,
693
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
694
+ latents: Optional[torch.FloatTensor] = None,
695
+ prompt_embeds: Optional[torch.FloatTensor] = None,
696
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
697
+ output_type: Optional[str] = "pil",
698
+ return_dict: bool = True,
699
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
700
+ callback_steps: int = 1,
701
+ ):
702
+ r"""
703
+ Function invoked when calling the pipeline for generation.
704
+ Args:
705
+ prompt (`str` or `List[str]`, *optional*):
706
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
707
+ instead.
708
+ image (`PIL.Image.Image`):
709
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
710
+ be masked out with `mask_image` and repainted according to `prompt`.
711
+ mask_image (`PIL.Image.Image`):
712
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
713
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
714
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
715
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
716
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
717
+ The height in pixels of the generated image.
718
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
719
+ The width in pixels of the generated image.
720
+ num_inference_steps (`int`, *optional*, defaults to 50):
721
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
722
+ expense of slower inference.
723
+ jump_length (`int`, *optional*, defaults to 10):
724
+ The number of steps taken forward in time before going backward in time for a single jump ("j" in
725
+ RePaint paper). Take a look at Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
726
+ jump_n_sample (`int`, *optional*, defaults to 10):
727
+ The number of times we will make forward time jump for a given chosen time sample. Take a look at
728
+ Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
729
+ guidance_scale (`float`, *optional*, defaults to 7.5):
730
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
731
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
732
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
733
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
734
+ usually at the expense of lower image quality.
735
+ negative_prompt (`str` or `List[str]`, *optional*):
736
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
737
+ `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
738
+ is less than `1`).
739
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
740
+ The number of images to generate per prompt.
741
+ eta (`float`, *optional*, defaults to 0.0):
742
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
743
+ [`schedulers.DDIMScheduler`], will be ignored for others.
744
+ generator (`torch.Generator`, *optional*):
745
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
746
+ to make generation deterministic.
747
+ latents (`torch.FloatTensor`, *optional*):
748
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
749
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
750
+ tensor will ge generated by sampling using the supplied random `generator`.
751
+ prompt_embeds (`torch.FloatTensor`, *optional*):
752
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
753
+ provided, text embeddings will be generated from `prompt` input argument.
754
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
755
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
756
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
757
+ argument.
758
+ output_type (`str`, *optional*, defaults to `"pil"`):
759
+ The output format of the generate image. Choose between
760
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
761
+ return_dict (`bool`, *optional*, defaults to `True`):
762
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
763
+ plain tuple.
764
+ callback (`Callable`, *optional*):
765
+ A function that will be called every `callback_steps` steps during inference. The function will be
766
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
767
+ callback_steps (`int`, *optional*, defaults to 1):
768
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
769
+ called at every step.
770
+ Examples:
771
+ ```py
772
+ >>> import PIL
773
+ >>> import requests
774
+ >>> import torch
775
+ >>> from io import BytesIO
776
+ >>> from diffusers import StableDiffusionPipeline, RePaintScheduler
777
+ >>> def download_image(url):
778
+ ... response = requests.get(url)
779
+ ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
780
+ >>> base_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/"
781
+ >>> img_url = base_url + "overture-creations-5sI6fQgYIuo.png"
782
+ >>> mask_url = base_url + "overture-creations-5sI6fQgYIuo_mask.png "
783
+ >>> init_image = download_image(img_url).resize((512, 512))
784
+ >>> mask_image = download_image(mask_url).resize((512, 512))
785
+ >>> pipe = DiffusionPipeline.from_pretrained(
786
+ ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, custom_pipeline="stable_diffusion_repaint",
787
+ ... )
788
+ >>> pipe.scheduler = RePaintScheduler.from_config(pipe.scheduler.config)
789
+ >>> pipe = pipe.to("cuda")
790
+ >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
791
+ >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
792
+ ```
793
+ Returns:
794
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
795
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
796
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
797
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
798
+ (nsfw) content, according to the `safety_checker`.
799
+ """
800
+ # 0. Default height and width to unet
801
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
802
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
803
+
804
+ # 1. Check inputs
805
+ self.check_inputs(
806
+ prompt,
807
+ height,
808
+ width,
809
+ callback_steps,
810
+ negative_prompt,
811
+ prompt_embeds,
812
+ negative_prompt_embeds,
813
+ )
814
+
815
+ if image is None:
816
+ raise ValueError("`image` input cannot be undefined.")
817
+
818
+ if mask_image is None:
819
+ raise ValueError("`mask_image` input cannot be undefined.")
820
+
821
+ # 2. Define call parameters
822
+ if prompt is not None and isinstance(prompt, str):
823
+ batch_size = 1
824
+ elif prompt is not None and isinstance(prompt, list):
825
+ batch_size = len(prompt)
826
+ else:
827
+ batch_size = prompt_embeds.shape[0]
828
+
829
+ device = self._execution_device
830
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
831
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
832
+ # corresponds to doing no classifier free guidance.
833
+ do_classifier_free_guidance = guidance_scale > 1.0
834
+
835
+ # 3. Encode input prompt
836
+ prompt_embeds = self._encode_prompt(
837
+ prompt,
838
+ device,
839
+ num_images_per_prompt,
840
+ do_classifier_free_guidance,
841
+ negative_prompt,
842
+ prompt_embeds=prompt_embeds,
843
+ negative_prompt_embeds=negative_prompt_embeds,
844
+ )
845
+
846
+ # 4. Preprocess mask and image
847
+ mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
848
+
849
+ # 5. set timesteps
850
+ self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, device)
851
+ self.scheduler.eta = eta
852
+
853
+ timesteps = self.scheduler.timesteps
854
+ # latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
855
+
856
+ # 6. Prepare latent variables
857
+ num_channels_latents = self.vae.config.latent_channels
858
+ latents = self.prepare_latents(
859
+ batch_size * num_images_per_prompt,
860
+ num_channels_latents,
861
+ height,
862
+ width,
863
+ prompt_embeds.dtype,
864
+ device,
865
+ generator,
866
+ latents,
867
+ )
868
+
869
+ # 7. Prepare mask latent variables
870
+ mask, masked_image_latents = self.prepare_mask_latents(
871
+ mask,
872
+ masked_image,
873
+ batch_size * num_images_per_prompt,
874
+ height,
875
+ width,
876
+ prompt_embeds.dtype,
877
+ device,
878
+ generator,
879
+ do_classifier_free_guidance=False, # We do not need duplicate mask and image
880
+ )
881
+
882
+ # 8. Check that sizes of mask, masked image and latents match
883
+ # num_channels_mask = mask.shape[1]
884
+ # num_channels_masked_image = masked_image_latents.shape[1]
885
+ if num_channels_latents != self.unet.config.in_channels:
886
+ raise ValueError(
887
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
888
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} "
889
+ f" = Please verify the config of"
890
+ " `pipeline.unet` or your `mask_image` or `image` input."
891
+ )
892
+
893
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
894
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
895
+
896
+ t_last = timesteps[0] + 1
897
+
898
+ # 10. Denoising loop
899
+ with self.progress_bar(total=len(timesteps)) as progress_bar:
900
+ for i, t in enumerate(timesteps):
901
+ if t >= t_last:
902
+ # compute the reverse: x_t-1 -> x_t
903
+ latents = self.scheduler.undo_step(latents, t_last, generator)
904
+ progress_bar.update()
905
+ t_last = t
906
+ continue
907
+
908
+ # expand the latents if we are doing classifier free guidance
909
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
910
+
911
+ # concat latents, mask, masked_image_latents in the channel dimension
912
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
913
+ # latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
914
+
915
+ # predict the noise residual
916
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
917
+
918
+ # perform guidance
919
+ if do_classifier_free_guidance:
920
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
921
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
922
+
923
+ # compute the previous noisy sample x_t -> x_t-1
924
+ latents = self.scheduler.step(
925
+ noise_pred,
926
+ t,
927
+ latents,
928
+ masked_image_latents,
929
+ mask,
930
+ **extra_step_kwargs,
931
+ ).prev_sample
932
+
933
+ # call the callback, if provided
934
+ progress_bar.update()
935
+ if callback is not None and i % callback_steps == 0:
936
+ step_idx = i // getattr(self.scheduler, "order", 1)
937
+ callback(step_idx, t, latents)
938
+
939
+ t_last = t
940
+
941
+ # 11. Post-processing
942
+ image = self.decode_latents(latents)
943
+
944
+ # 12. Run safety checker
945
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
946
+
947
+ # 13. Convert to PIL
948
+ if output_type == "pil":
949
+ image = self.numpy_to_pil(image)
950
+
951
+ # Offload last model to CPU
952
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
953
+ self.final_offload_hook.offload()
954
+
955
+ if not return_dict:
956
+ return (image, has_nsfw_concept)
957
+
958
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.24.0/stable_diffusion_tensorrt_img2img.py ADDED
@@ -0,0 +1,1055 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ # SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ import gc
19
+ import os
20
+ from collections import OrderedDict
21
+ from copy import copy
22
+ from typing import List, Optional, Union
23
+
24
+ import numpy as np
25
+ import onnx
26
+ import onnx_graphsurgeon as gs
27
+ import PIL.Image
28
+ import tensorrt as trt
29
+ import torch
30
+ from huggingface_hub import snapshot_download
31
+ from onnx import shape_inference
32
+ from polygraphy import cuda
33
+ from polygraphy.backend.common import bytes_from_path
34
+ from polygraphy.backend.onnx.loader import fold_constants
35
+ from polygraphy.backend.trt import (
36
+ CreateConfig,
37
+ Profile,
38
+ engine_from_bytes,
39
+ engine_from_network,
40
+ network_from_onnx_path,
41
+ save_engine,
42
+ )
43
+ from polygraphy.backend.trt import util as trt_util
44
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
45
+
46
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
47
+ from diffusers.pipelines.stable_diffusion import (
48
+ StableDiffusionImg2ImgPipeline,
49
+ StableDiffusionPipelineOutput,
50
+ StableDiffusionSafetyChecker,
51
+ )
52
+ from diffusers.schedulers import DDIMScheduler
53
+ from diffusers.utils import DIFFUSERS_CACHE, logging
54
+
55
+
56
+ """
57
+ Installation instructions
58
+ python3 -m pip install --upgrade transformers diffusers>=0.16.0
59
+ python3 -m pip install --upgrade tensorrt>=8.6.1
60
+ python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
61
+ python3 -m pip install onnxruntime
62
+ """
63
+
64
+ TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
65
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
66
+
67
+ # Map of numpy dtype -> torch dtype
68
+ numpy_to_torch_dtype_dict = {
69
+ np.uint8: torch.uint8,
70
+ np.int8: torch.int8,
71
+ np.int16: torch.int16,
72
+ np.int32: torch.int32,
73
+ np.int64: torch.int64,
74
+ np.float16: torch.float16,
75
+ np.float32: torch.float32,
76
+ np.float64: torch.float64,
77
+ np.complex64: torch.complex64,
78
+ np.complex128: torch.complex128,
79
+ }
80
+ if np.version.full_version >= "1.24.0":
81
+ numpy_to_torch_dtype_dict[np.bool_] = torch.bool
82
+ else:
83
+ numpy_to_torch_dtype_dict[np.bool] = torch.bool
84
+
85
+ # Map of torch dtype -> numpy dtype
86
+ torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
87
+
88
+
89
+ def device_view(t):
90
+ return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
91
+
92
+
93
+ def preprocess_image(image):
94
+ """
95
+ image: torch.Tensor
96
+ """
97
+ w, h = image.size
98
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
99
+ image = image.resize((w, h))
100
+ image = np.array(image).astype(np.float32) / 255.0
101
+ image = image[None].transpose(0, 3, 1, 2)
102
+ image = torch.from_numpy(image).contiguous()
103
+ return 2.0 * image - 1.0
104
+
105
+
106
+ class Engine:
107
+ def __init__(self, engine_path):
108
+ self.engine_path = engine_path
109
+ self.engine = None
110
+ self.context = None
111
+ self.buffers = OrderedDict()
112
+ self.tensors = OrderedDict()
113
+
114
+ def __del__(self):
115
+ [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
116
+ del self.engine
117
+ del self.context
118
+ del self.buffers
119
+ del self.tensors
120
+
121
+ def build(
122
+ self,
123
+ onnx_path,
124
+ fp16,
125
+ input_profile=None,
126
+ enable_preview=False,
127
+ enable_all_tactics=False,
128
+ timing_cache=None,
129
+ workspace_size=0,
130
+ ):
131
+ logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
132
+ p = Profile()
133
+ if input_profile:
134
+ for name, dims in input_profile.items():
135
+ assert len(dims) == 3
136
+ p.add(name, min=dims[0], opt=dims[1], max=dims[2])
137
+
138
+ config_kwargs = {}
139
+
140
+ config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
141
+ if enable_preview:
142
+ # Faster dynamic shapes made optional since it increases engine build time.
143
+ config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
144
+ if workspace_size > 0:
145
+ config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
146
+ if not enable_all_tactics:
147
+ config_kwargs["tactic_sources"] = []
148
+
149
+ engine = engine_from_network(
150
+ network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
151
+ config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
152
+ save_timing_cache=timing_cache,
153
+ )
154
+ save_engine(engine, path=self.engine_path)
155
+
156
+ def load(self):
157
+ logger.warning(f"Loading TensorRT engine: {self.engine_path}")
158
+ self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
159
+
160
+ def activate(self):
161
+ self.context = self.engine.create_execution_context()
162
+
163
+ def allocate_buffers(self, shape_dict=None, device="cuda"):
164
+ for idx in range(trt_util.get_bindings_per_profile(self.engine)):
165
+ binding = self.engine[idx]
166
+ if shape_dict and binding in shape_dict:
167
+ shape = shape_dict[binding]
168
+ else:
169
+ shape = self.engine.get_binding_shape(binding)
170
+ dtype = trt.nptype(self.engine.get_binding_dtype(binding))
171
+ if self.engine.binding_is_input(binding):
172
+ self.context.set_binding_shape(idx, shape)
173
+ tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
174
+ self.tensors[binding] = tensor
175
+ self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
176
+
177
+ def infer(self, feed_dict, stream):
178
+ start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
179
+ # shallow copy of ordered dict
180
+ device_buffers = copy(self.buffers)
181
+ for name, buf in feed_dict.items():
182
+ assert isinstance(buf, cuda.DeviceView)
183
+ device_buffers[name] = buf
184
+ bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]
185
+ noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)
186
+ if not noerror:
187
+ raise ValueError("ERROR: inference failed.")
188
+
189
+ return self.tensors
190
+
191
+
192
+ class Optimizer:
193
+ def __init__(self, onnx_graph):
194
+ self.graph = gs.import_onnx(onnx_graph)
195
+
196
+ def cleanup(self, return_onnx=False):
197
+ self.graph.cleanup().toposort()
198
+ if return_onnx:
199
+ return gs.export_onnx(self.graph)
200
+
201
+ def select_outputs(self, keep, names=None):
202
+ self.graph.outputs = [self.graph.outputs[o] for o in keep]
203
+ if names:
204
+ for i, name in enumerate(names):
205
+ self.graph.outputs[i].name = name
206
+
207
+ def fold_constants(self, return_onnx=False):
208
+ onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
209
+ self.graph = gs.import_onnx(onnx_graph)
210
+ if return_onnx:
211
+ return onnx_graph
212
+
213
+ def infer_shapes(self, return_onnx=False):
214
+ onnx_graph = gs.export_onnx(self.graph)
215
+ if onnx_graph.ByteSize() > 2147483648:
216
+ raise TypeError("ERROR: model size exceeds supported 2GB limit")
217
+ else:
218
+ onnx_graph = shape_inference.infer_shapes(onnx_graph)
219
+
220
+ self.graph = gs.import_onnx(onnx_graph)
221
+ if return_onnx:
222
+ return onnx_graph
223
+
224
+
225
+ class BaseModel:
226
+ def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77):
227
+ self.model = model
228
+ self.name = "SD Model"
229
+ self.fp16 = fp16
230
+ self.device = device
231
+
232
+ self.min_batch = 1
233
+ self.max_batch = max_batch_size
234
+ self.min_image_shape = 256 # min image resolution: 256x256
235
+ self.max_image_shape = 1024 # max image resolution: 1024x1024
236
+ self.min_latent_shape = self.min_image_shape // 8
237
+ self.max_latent_shape = self.max_image_shape // 8
238
+
239
+ self.embedding_dim = embedding_dim
240
+ self.text_maxlen = text_maxlen
241
+
242
+ def get_model(self):
243
+ return self.model
244
+
245
+ def get_input_names(self):
246
+ pass
247
+
248
+ def get_output_names(self):
249
+ pass
250
+
251
+ def get_dynamic_axes(self):
252
+ return None
253
+
254
+ def get_sample_input(self, batch_size, image_height, image_width):
255
+ pass
256
+
257
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
258
+ return None
259
+
260
+ def get_shape_dict(self, batch_size, image_height, image_width):
261
+ return None
262
+
263
+ def optimize(self, onnx_graph):
264
+ opt = Optimizer(onnx_graph)
265
+ opt.cleanup()
266
+ opt.fold_constants()
267
+ opt.infer_shapes()
268
+ onnx_opt_graph = opt.cleanup(return_onnx=True)
269
+ return onnx_opt_graph
270
+
271
+ def check_dims(self, batch_size, image_height, image_width):
272
+ assert batch_size >= self.min_batch and batch_size <= self.max_batch
273
+ assert image_height % 8 == 0 or image_width % 8 == 0
274
+ latent_height = image_height // 8
275
+ latent_width = image_width // 8
276
+ assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
277
+ assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
278
+ return (latent_height, latent_width)
279
+
280
+ def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
281
+ min_batch = batch_size if static_batch else self.min_batch
282
+ max_batch = batch_size if static_batch else self.max_batch
283
+ latent_height = image_height // 8
284
+ latent_width = image_width // 8
285
+ min_image_height = image_height if static_shape else self.min_image_shape
286
+ max_image_height = image_height if static_shape else self.max_image_shape
287
+ min_image_width = image_width if static_shape else self.min_image_shape
288
+ max_image_width = image_width if static_shape else self.max_image_shape
289
+ min_latent_height = latent_height if static_shape else self.min_latent_shape
290
+ max_latent_height = latent_height if static_shape else self.max_latent_shape
291
+ min_latent_width = latent_width if static_shape else self.min_latent_shape
292
+ max_latent_width = latent_width if static_shape else self.max_latent_shape
293
+ return (
294
+ min_batch,
295
+ max_batch,
296
+ min_image_height,
297
+ max_image_height,
298
+ min_image_width,
299
+ max_image_width,
300
+ min_latent_height,
301
+ max_latent_height,
302
+ min_latent_width,
303
+ max_latent_width,
304
+ )
305
+
306
+
307
+ def getOnnxPath(model_name, onnx_dir, opt=True):
308
+ return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx")
309
+
310
+
311
+ def getEnginePath(model_name, engine_dir):
312
+ return os.path.join(engine_dir, model_name + ".plan")
313
+
314
+
315
+ def build_engines(
316
+ models: dict,
317
+ engine_dir,
318
+ onnx_dir,
319
+ onnx_opset,
320
+ opt_image_height,
321
+ opt_image_width,
322
+ opt_batch_size=1,
323
+ force_engine_rebuild=False,
324
+ static_batch=False,
325
+ static_shape=True,
326
+ enable_preview=False,
327
+ enable_all_tactics=False,
328
+ timing_cache=None,
329
+ max_workspace_size=0,
330
+ ):
331
+ built_engines = {}
332
+ if not os.path.isdir(onnx_dir):
333
+ os.makedirs(onnx_dir)
334
+ if not os.path.isdir(engine_dir):
335
+ os.makedirs(engine_dir)
336
+
337
+ # Export models to ONNX
338
+ for model_name, model_obj in models.items():
339
+ engine_path = getEnginePath(model_name, engine_dir)
340
+ if force_engine_rebuild or not os.path.exists(engine_path):
341
+ logger.warning("Building Engines...")
342
+ logger.warning("Engine build can take a while to complete")
343
+ onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
344
+ onnx_opt_path = getOnnxPath(model_name, onnx_dir)
345
+ if force_engine_rebuild or not os.path.exists(onnx_opt_path):
346
+ if force_engine_rebuild or not os.path.exists(onnx_path):
347
+ logger.warning(f"Exporting model: {onnx_path}")
348
+ model = model_obj.get_model()
349
+ with torch.inference_mode(), torch.autocast("cuda"):
350
+ inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
351
+ torch.onnx.export(
352
+ model,
353
+ inputs,
354
+ onnx_path,
355
+ export_params=True,
356
+ opset_version=onnx_opset,
357
+ do_constant_folding=True,
358
+ input_names=model_obj.get_input_names(),
359
+ output_names=model_obj.get_output_names(),
360
+ dynamic_axes=model_obj.get_dynamic_axes(),
361
+ )
362
+ del model
363
+ torch.cuda.empty_cache()
364
+ gc.collect()
365
+ else:
366
+ logger.warning(f"Found cached model: {onnx_path}")
367
+
368
+ # Optimize onnx
369
+ if force_engine_rebuild or not os.path.exists(onnx_opt_path):
370
+ logger.warning(f"Generating optimizing model: {onnx_opt_path}")
371
+ onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))
372
+ onnx.save(onnx_opt_graph, onnx_opt_path)
373
+ else:
374
+ logger.warning(f"Found cached optimized model: {onnx_opt_path} ")
375
+
376
+ # Build TensorRT engines
377
+ for model_name, model_obj in models.items():
378
+ engine_path = getEnginePath(model_name, engine_dir)
379
+ engine = Engine(engine_path)
380
+ onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
381
+ onnx_opt_path = getOnnxPath(model_name, onnx_dir)
382
+
383
+ if force_engine_rebuild or not os.path.exists(engine.engine_path):
384
+ engine.build(
385
+ onnx_opt_path,
386
+ fp16=True,
387
+ input_profile=model_obj.get_input_profile(
388
+ opt_batch_size,
389
+ opt_image_height,
390
+ opt_image_width,
391
+ static_batch=static_batch,
392
+ static_shape=static_shape,
393
+ ),
394
+ enable_preview=enable_preview,
395
+ timing_cache=timing_cache,
396
+ workspace_size=max_workspace_size,
397
+ )
398
+ built_engines[model_name] = engine
399
+
400
+ # Load and activate TensorRT engines
401
+ for model_name, model_obj in models.items():
402
+ engine = built_engines[model_name]
403
+ engine.load()
404
+ engine.activate()
405
+
406
+ return built_engines
407
+
408
+
409
+ def runEngine(engine, feed_dict, stream):
410
+ return engine.infer(feed_dict, stream)
411
+
412
+
413
+ class CLIP(BaseModel):
414
+ def __init__(self, model, device, max_batch_size, embedding_dim):
415
+ super(CLIP, self).__init__(
416
+ model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
417
+ )
418
+ self.name = "CLIP"
419
+
420
+ def get_input_names(self):
421
+ return ["input_ids"]
422
+
423
+ def get_output_names(self):
424
+ return ["text_embeddings", "pooler_output"]
425
+
426
+ def get_dynamic_axes(self):
427
+ return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}}
428
+
429
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
430
+ self.check_dims(batch_size, image_height, image_width)
431
+ min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(
432
+ batch_size, image_height, image_width, static_batch, static_shape
433
+ )
434
+ return {
435
+ "input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]
436
+ }
437
+
438
+ def get_shape_dict(self, batch_size, image_height, image_width):
439
+ self.check_dims(batch_size, image_height, image_width)
440
+ return {
441
+ "input_ids": (batch_size, self.text_maxlen),
442
+ "text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim),
443
+ }
444
+
445
+ def get_sample_input(self, batch_size, image_height, image_width):
446
+ self.check_dims(batch_size, image_height, image_width)
447
+ return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)
448
+
449
+ def optimize(self, onnx_graph):
450
+ opt = Optimizer(onnx_graph)
451
+ opt.select_outputs([0]) # delete graph output#1
452
+ opt.cleanup()
453
+ opt.fold_constants()
454
+ opt.infer_shapes()
455
+ opt.select_outputs([0], names=["text_embeddings"]) # rename network output
456
+ opt_onnx_graph = opt.cleanup(return_onnx=True)
457
+ return opt_onnx_graph
458
+
459
+
460
+ def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):
461
+ return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
462
+
463
+
464
+ class UNet(BaseModel):
465
+ def __init__(
466
+ self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4
467
+ ):
468
+ super(UNet, self).__init__(
469
+ model=model,
470
+ fp16=fp16,
471
+ device=device,
472
+ max_batch_size=max_batch_size,
473
+ embedding_dim=embedding_dim,
474
+ text_maxlen=text_maxlen,
475
+ )
476
+ self.unet_dim = unet_dim
477
+ self.name = "UNet"
478
+
479
+ def get_input_names(self):
480
+ return ["sample", "timestep", "encoder_hidden_states"]
481
+
482
+ def get_output_names(self):
483
+ return ["latent"]
484
+
485
+ def get_dynamic_axes(self):
486
+ return {
487
+ "sample": {0: "2B", 2: "H", 3: "W"},
488
+ "encoder_hidden_states": {0: "2B"},
489
+ "latent": {0: "2B", 2: "H", 3: "W"},
490
+ }
491
+
492
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
493
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
494
+ (
495
+ min_batch,
496
+ max_batch,
497
+ _,
498
+ _,
499
+ _,
500
+ _,
501
+ min_latent_height,
502
+ max_latent_height,
503
+ min_latent_width,
504
+ max_latent_width,
505
+ ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
506
+ return {
507
+ "sample": [
508
+ (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),
509
+ (2 * batch_size, self.unet_dim, latent_height, latent_width),
510
+ (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),
511
+ ],
512
+ "encoder_hidden_states": [
513
+ (2 * min_batch, self.text_maxlen, self.embedding_dim),
514
+ (2 * batch_size, self.text_maxlen, self.embedding_dim),
515
+ (2 * max_batch, self.text_maxlen, self.embedding_dim),
516
+ ],
517
+ }
518
+
519
+ def get_shape_dict(self, batch_size, image_height, image_width):
520
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
521
+ return {
522
+ "sample": (2 * batch_size, self.unet_dim, latent_height, latent_width),
523
+ "encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim),
524
+ "latent": (2 * batch_size, 4, latent_height, latent_width),
525
+ }
526
+
527
+ def get_sample_input(self, batch_size, image_height, image_width):
528
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
529
+ dtype = torch.float16 if self.fp16 else torch.float32
530
+ return (
531
+ torch.randn(
532
+ 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device
533
+ ),
534
+ torch.tensor([1.0], dtype=torch.float32, device=self.device),
535
+ torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),
536
+ )
537
+
538
+
539
+ def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False):
540
+ return UNet(
541
+ model,
542
+ fp16=True,
543
+ device=device,
544
+ max_batch_size=max_batch_size,
545
+ embedding_dim=embedding_dim,
546
+ unet_dim=(9 if inpaint else 4),
547
+ )
548
+
549
+
550
+ class VAE(BaseModel):
551
+ def __init__(self, model, device, max_batch_size, embedding_dim):
552
+ super(VAE, self).__init__(
553
+ model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
554
+ )
555
+ self.name = "VAE decoder"
556
+
557
+ def get_input_names(self):
558
+ return ["latent"]
559
+
560
+ def get_output_names(self):
561
+ return ["images"]
562
+
563
+ def get_dynamic_axes(self):
564
+ return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}}
565
+
566
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
567
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
568
+ (
569
+ min_batch,
570
+ max_batch,
571
+ _,
572
+ _,
573
+ _,
574
+ _,
575
+ min_latent_height,
576
+ max_latent_height,
577
+ min_latent_width,
578
+ max_latent_width,
579
+ ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
580
+ return {
581
+ "latent": [
582
+ (min_batch, 4, min_latent_height, min_latent_width),
583
+ (batch_size, 4, latent_height, latent_width),
584
+ (max_batch, 4, max_latent_height, max_latent_width),
585
+ ]
586
+ }
587
+
588
+ def get_shape_dict(self, batch_size, image_height, image_width):
589
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
590
+ return {
591
+ "latent": (batch_size, 4, latent_height, latent_width),
592
+ "images": (batch_size, 3, image_height, image_width),
593
+ }
594
+
595
+ def get_sample_input(self, batch_size, image_height, image_width):
596
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
597
+ return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)
598
+
599
+
600
+ def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):
601
+ return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
602
+
603
+
604
+ class TorchVAEEncoder(torch.nn.Module):
605
+ def __init__(self, model):
606
+ super().__init__()
607
+ self.vae_encoder = model
608
+
609
+ def forward(self, x):
610
+ return self.vae_encoder.encode(x).latent_dist.sample()
611
+
612
+
613
+ class VAEEncoder(BaseModel):
614
+ def __init__(self, model, device, max_batch_size, embedding_dim):
615
+ super(VAEEncoder, self).__init__(
616
+ model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
617
+ )
618
+ self.name = "VAE encoder"
619
+
620
+ def get_model(self):
621
+ vae_encoder = TorchVAEEncoder(self.model)
622
+ return vae_encoder
623
+
624
+ def get_input_names(self):
625
+ return ["images"]
626
+
627
+ def get_output_names(self):
628
+ return ["latent"]
629
+
630
+ def get_dynamic_axes(self):
631
+ return {"images": {0: "B", 2: "8H", 3: "8W"}, "latent": {0: "B", 2: "H", 3: "W"}}
632
+
633
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
634
+ assert batch_size >= self.min_batch and batch_size <= self.max_batch
635
+ min_batch = batch_size if static_batch else self.min_batch
636
+ max_batch = batch_size if static_batch else self.max_batch
637
+ self.check_dims(batch_size, image_height, image_width)
638
+ (
639
+ min_batch,
640
+ max_batch,
641
+ min_image_height,
642
+ max_image_height,
643
+ min_image_width,
644
+ max_image_width,
645
+ _,
646
+ _,
647
+ _,
648
+ _,
649
+ ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
650
+
651
+ return {
652
+ "images": [
653
+ (min_batch, 3, min_image_height, min_image_width),
654
+ (batch_size, 3, image_height, image_width),
655
+ (max_batch, 3, max_image_height, max_image_width),
656
+ ]
657
+ }
658
+
659
+ def get_shape_dict(self, batch_size, image_height, image_width):
660
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
661
+ return {
662
+ "images": (batch_size, 3, image_height, image_width),
663
+ "latent": (batch_size, 4, latent_height, latent_width),
664
+ }
665
+
666
+ def get_sample_input(self, batch_size, image_height, image_width):
667
+ self.check_dims(batch_size, image_height, image_width)
668
+ return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device)
669
+
670
+
671
+ def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False):
672
+ return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
673
+
674
+
675
+ class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
676
+ r"""
677
+ Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion.
678
+
679
+ This model inherits from [`StableDiffusionImg2ImgPipeline`]. Check the superclass documentation for the generic methods the
680
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
681
+
682
+ Args:
683
+ vae ([`AutoencoderKL`]):
684
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
685
+ text_encoder ([`CLIPTextModel`]):
686
+ Frozen text-encoder. Stable Diffusion uses the text portion of
687
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
688
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
689
+ tokenizer (`CLIPTokenizer`):
690
+ Tokenizer of class
691
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
692
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
693
+ scheduler ([`SchedulerMixin`]):
694
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
695
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
696
+ safety_checker ([`StableDiffusionSafetyChecker`]):
697
+ Classification module that estimates whether generated images could be considered offensive or harmful.
698
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
699
+ feature_extractor ([`CLIPFeatureExtractor`]):
700
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
701
+ """
702
+
703
+ def __init__(
704
+ self,
705
+ vae: AutoencoderKL,
706
+ text_encoder: CLIPTextModel,
707
+ tokenizer: CLIPTokenizer,
708
+ unet: UNet2DConditionModel,
709
+ scheduler: DDIMScheduler,
710
+ safety_checker: StableDiffusionSafetyChecker,
711
+ feature_extractor: CLIPFeatureExtractor,
712
+ requires_safety_checker: bool = True,
713
+ stages=["clip", "unet", "vae", "vae_encoder"],
714
+ image_height: int = 512,
715
+ image_width: int = 512,
716
+ max_batch_size: int = 16,
717
+ # ONNX export parameters
718
+ onnx_opset: int = 17,
719
+ onnx_dir: str = "onnx",
720
+ # TensorRT engine build parameters
721
+ engine_dir: str = "engine",
722
+ build_preview_features: bool = True,
723
+ force_engine_rebuild: bool = False,
724
+ timing_cache: str = "timing_cache",
725
+ ):
726
+ super().__init__(
727
+ vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker
728
+ )
729
+
730
+ self.vae.forward = self.vae.decode
731
+
732
+ self.stages = stages
733
+ self.image_height, self.image_width = image_height, image_width
734
+ self.inpaint = False
735
+ self.onnx_opset = onnx_opset
736
+ self.onnx_dir = onnx_dir
737
+ self.engine_dir = engine_dir
738
+ self.force_engine_rebuild = force_engine_rebuild
739
+ self.timing_cache = timing_cache
740
+ self.build_static_batch = False
741
+ self.build_dynamic_shape = False
742
+ self.build_preview_features = build_preview_features
743
+
744
+ self.max_batch_size = max_batch_size
745
+ # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
746
+ if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:
747
+ self.max_batch_size = 4
748
+
749
+ self.stream = None # loaded in loadResources()
750
+ self.models = {} # loaded in __loadModels()
751
+ self.engine = {} # loaded in build_engines()
752
+
753
+ def __loadModels(self):
754
+ # Load pipeline models
755
+ self.embedding_dim = self.text_encoder.config.hidden_size
756
+ models_args = {
757
+ "device": self.torch_device,
758
+ "max_batch_size": self.max_batch_size,
759
+ "embedding_dim": self.embedding_dim,
760
+ "inpaint": self.inpaint,
761
+ }
762
+ if "clip" in self.stages:
763
+ self.models["clip"] = make_CLIP(self.text_encoder, **models_args)
764
+ if "unet" in self.stages:
765
+ self.models["unet"] = make_UNet(self.unet, **models_args)
766
+ if "vae" in self.stages:
767
+ self.models["vae"] = make_VAE(self.vae, **models_args)
768
+ if "vae_encoder" in self.stages:
769
+ self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
770
+
771
+ @classmethod
772
+ def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
773
+ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
774
+ resume_download = kwargs.pop("resume_download", False)
775
+ proxies = kwargs.pop("proxies", None)
776
+ local_files_only = kwargs.pop("local_files_only", False)
777
+ use_auth_token = kwargs.pop("use_auth_token", None)
778
+ revision = kwargs.pop("revision", None)
779
+
780
+ cls.cached_folder = (
781
+ pretrained_model_name_or_path
782
+ if os.path.isdir(pretrained_model_name_or_path)
783
+ else snapshot_download(
784
+ pretrained_model_name_or_path,
785
+ cache_dir=cache_dir,
786
+ resume_download=resume_download,
787
+ proxies=proxies,
788
+ local_files_only=local_files_only,
789
+ use_auth_token=use_auth_token,
790
+ revision=revision,
791
+ )
792
+ )
793
+
794
+ def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):
795
+ super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)
796
+
797
+ self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)
798
+ self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)
799
+ self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)
800
+
801
+ # set device
802
+ self.torch_device = self._execution_device
803
+ logger.warning(f"Running inference on device: {self.torch_device}")
804
+
805
+ # load models
806
+ self.__loadModels()
807
+
808
+ # build engines
809
+ self.engine = build_engines(
810
+ self.models,
811
+ self.engine_dir,
812
+ self.onnx_dir,
813
+ self.onnx_opset,
814
+ opt_image_height=self.image_height,
815
+ opt_image_width=self.image_width,
816
+ force_engine_rebuild=self.force_engine_rebuild,
817
+ static_batch=self.build_static_batch,
818
+ static_shape=not self.build_dynamic_shape,
819
+ enable_preview=self.build_preview_features,
820
+ timing_cache=self.timing_cache,
821
+ )
822
+
823
+ return self
824
+
825
+ def __initialize_timesteps(self, timesteps, strength):
826
+ self.scheduler.set_timesteps(timesteps)
827
+ offset = self.scheduler.steps_offset if hasattr(self.scheduler, "steps_offset") else 0
828
+ init_timestep = int(timesteps * strength) + offset
829
+ init_timestep = min(init_timestep, timesteps)
830
+ t_start = max(timesteps - init_timestep + offset, 0)
831
+ timesteps = self.scheduler.timesteps[t_start:].to(self.torch_device)
832
+ return timesteps, t_start
833
+
834
+ def __preprocess_images(self, batch_size, images=()):
835
+ init_images = []
836
+ for image in images:
837
+ image = image.to(self.torch_device).float()
838
+ image = image.repeat(batch_size, 1, 1, 1)
839
+ init_images.append(image)
840
+ return tuple(init_images)
841
+
842
+ def __encode_image(self, init_image):
843
+ init_latents = runEngine(self.engine["vae_encoder"], {"images": device_view(init_image)}, self.stream)[
844
+ "latent"
845
+ ]
846
+ init_latents = 0.18215 * init_latents
847
+ return init_latents
848
+
849
+ def __encode_prompt(self, prompt, negative_prompt):
850
+ r"""
851
+ Encodes the prompt into text encoder hidden states.
852
+
853
+ Args:
854
+ prompt (`str` or `List[str]`, *optional*):
855
+ prompt to be encoded
856
+ negative_prompt (`str` or `List[str]`, *optional*):
857
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
858
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
859
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
860
+ """
861
+ # Tokenize prompt
862
+ text_input_ids = (
863
+ self.tokenizer(
864
+ prompt,
865
+ padding="max_length",
866
+ max_length=self.tokenizer.model_max_length,
867
+ truncation=True,
868
+ return_tensors="pt",
869
+ )
870
+ .input_ids.type(torch.int32)
871
+ .to(self.torch_device)
872
+ )
873
+
874
+ text_input_ids_inp = device_view(text_input_ids)
875
+ # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
876
+ text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[
877
+ "text_embeddings"
878
+ ].clone()
879
+
880
+ # Tokenize negative prompt
881
+ uncond_input_ids = (
882
+ self.tokenizer(
883
+ negative_prompt,
884
+ padding="max_length",
885
+ max_length=self.tokenizer.model_max_length,
886
+ truncation=True,
887
+ return_tensors="pt",
888
+ )
889
+ .input_ids.type(torch.int32)
890
+ .to(self.torch_device)
891
+ )
892
+ uncond_input_ids_inp = device_view(uncond_input_ids)
893
+ uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
894
+ "text_embeddings"
895
+ ]
896
+
897
+ # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance
898
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)
899
+
900
+ return text_embeddings
901
+
902
+ def __denoise_latent(
903
+ self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None
904
+ ):
905
+ if not isinstance(timesteps, torch.Tensor):
906
+ timesteps = self.scheduler.timesteps
907
+ for step_index, timestep in enumerate(timesteps):
908
+ # Expand the latents if we are doing classifier free guidance
909
+ latent_model_input = torch.cat([latents] * 2)
910
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
911
+ if isinstance(mask, torch.Tensor):
912
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
913
+
914
+ # Predict the noise residual
915
+ timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
916
+
917
+ sample_inp = device_view(latent_model_input)
918
+ timestep_inp = device_view(timestep_float)
919
+ embeddings_inp = device_view(text_embeddings)
920
+ noise_pred = runEngine(
921
+ self.engine["unet"],
922
+ {"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp},
923
+ self.stream,
924
+ )["latent"]
925
+
926
+ # Perform guidance
927
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
928
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
929
+
930
+ latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
931
+
932
+ latents = 1.0 / 0.18215 * latents
933
+ return latents
934
+
935
+ def __decode_latent(self, latents):
936
+ images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"]
937
+ images = (images / 2 + 0.5).clamp(0, 1)
938
+ return images.cpu().permute(0, 2, 3, 1).float().numpy()
939
+
940
+ def __loadResources(self, image_height, image_width, batch_size):
941
+ self.stream = cuda.Stream()
942
+
943
+ # Allocate buffers for TensorRT engine bindings
944
+ for model_name, obj in self.models.items():
945
+ self.engine[model_name].allocate_buffers(
946
+ shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device
947
+ )
948
+
949
+ @torch.no_grad()
950
+ def __call__(
951
+ self,
952
+ prompt: Union[str, List[str]] = None,
953
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
954
+ strength: float = 0.8,
955
+ num_inference_steps: int = 50,
956
+ guidance_scale: float = 7.5,
957
+ negative_prompt: Optional[Union[str, List[str]]] = None,
958
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
959
+ ):
960
+ r"""
961
+ Function invoked when calling the pipeline for generation.
962
+
963
+ Args:
964
+ prompt (`str` or `List[str]`, *optional*):
965
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
966
+ instead.
967
+ image (`PIL.Image.Image`):
968
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
969
+ be masked out with `mask_image` and repainted according to `prompt`.
970
+ strength (`float`, *optional*, defaults to 0.8):
971
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
972
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
973
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
974
+ be maximum and the denoising process will run for the full number of iterations specified in
975
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
976
+ num_inference_steps (`int`, *optional*, defaults to 50):
977
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
978
+ expense of slower inference.
979
+ guidance_scale (`float`, *optional*, defaults to 7.5):
980
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
981
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
982
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
983
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
984
+ usually at the expense of lower image quality.
985
+ negative_prompt (`str` or `List[str]`, *optional*):
986
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
987
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
988
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
989
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
990
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
991
+ to make generation deterministic.
992
+
993
+ """
994
+ self.generator = generator
995
+ self.denoising_steps = num_inference_steps
996
+ self.guidance_scale = guidance_scale
997
+
998
+ # Pre-compute latent input scales and linear multistep coefficients
999
+ self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)
1000
+
1001
+ # Define call parameters
1002
+ if prompt is not None and isinstance(prompt, str):
1003
+ batch_size = 1
1004
+ prompt = [prompt]
1005
+ elif prompt is not None and isinstance(prompt, list):
1006
+ batch_size = len(prompt)
1007
+ else:
1008
+ raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}")
1009
+
1010
+ if negative_prompt is None:
1011
+ negative_prompt = [""] * batch_size
1012
+
1013
+ if negative_prompt is not None and isinstance(negative_prompt, str):
1014
+ negative_prompt = [negative_prompt]
1015
+
1016
+ assert len(prompt) == len(negative_prompt)
1017
+
1018
+ if batch_size > self.max_batch_size:
1019
+ raise ValueError(
1020
+ f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4"
1021
+ )
1022
+
1023
+ # load resources
1024
+ self.__loadResources(self.image_height, self.image_width, batch_size)
1025
+
1026
+ with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER):
1027
+ # Initialize timesteps
1028
+ timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength)
1029
+ latent_timestep = timesteps[:1].repeat(batch_size)
1030
+
1031
+ # Pre-process input image
1032
+ if isinstance(image, PIL.Image.Image):
1033
+ image = preprocess_image(image)
1034
+ init_image = self.__preprocess_images(batch_size, (image,))[0]
1035
+
1036
+ # VAE encode init image
1037
+ init_latents = self.__encode_image(init_image)
1038
+
1039
+ # Add noise to latents using timesteps
1040
+ noise = torch.randn(
1041
+ init_latents.shape, generator=self.generator, device=self.torch_device, dtype=torch.float32
1042
+ )
1043
+ latents = self.scheduler.add_noise(init_latents, noise, latent_timestep)
1044
+
1045
+ # CLIP text encoder
1046
+ text_embeddings = self.__encode_prompt(prompt, negative_prompt)
1047
+
1048
+ # UNet denoiser
1049
+ latents = self.__denoise_latent(latents, text_embeddings, timesteps=timesteps, step_offset=t_start)
1050
+
1051
+ # VAE decode latent
1052
+ images = self.__decode_latent(latents)
1053
+
1054
+ images = self.numpy_to_pil(images)
1055
+ return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None)
v0.24.0/stable_diffusion_tensorrt_inpaint.py ADDED
@@ -0,0 +1,1107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ # SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ import gc
19
+ import os
20
+ from collections import OrderedDict
21
+ from copy import copy
22
+ from typing import List, Optional, Union
23
+
24
+ import numpy as np
25
+ import onnx
26
+ import onnx_graphsurgeon as gs
27
+ import PIL.Image
28
+ import tensorrt as trt
29
+ import torch
30
+ from huggingface_hub import snapshot_download
31
+ from onnx import shape_inference
32
+ from polygraphy import cuda
33
+ from polygraphy.backend.common import bytes_from_path
34
+ from polygraphy.backend.onnx.loader import fold_constants
35
+ from polygraphy.backend.trt import (
36
+ CreateConfig,
37
+ Profile,
38
+ engine_from_bytes,
39
+ engine_from_network,
40
+ network_from_onnx_path,
41
+ save_engine,
42
+ )
43
+ from polygraphy.backend.trt import util as trt_util
44
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
45
+
46
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
47
+ from diffusers.pipelines.stable_diffusion import (
48
+ StableDiffusionInpaintPipeline,
49
+ StableDiffusionPipelineOutput,
50
+ StableDiffusionSafetyChecker,
51
+ )
52
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image
53
+ from diffusers.schedulers import DDIMScheduler
54
+ from diffusers.utils import DIFFUSERS_CACHE, logging
55
+
56
+
57
+ """
58
+ Installation instructions
59
+ python3 -m pip install --upgrade transformers diffusers>=0.16.0
60
+ python3 -m pip install --upgrade tensorrt>=8.6.1
61
+ python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
62
+ python3 -m pip install onnxruntime
63
+ """
64
+
65
+ TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
66
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
67
+
68
+ # Map of numpy dtype -> torch dtype
69
+ numpy_to_torch_dtype_dict = {
70
+ np.uint8: torch.uint8,
71
+ np.int8: torch.int8,
72
+ np.int16: torch.int16,
73
+ np.int32: torch.int32,
74
+ np.int64: torch.int64,
75
+ np.float16: torch.float16,
76
+ np.float32: torch.float32,
77
+ np.float64: torch.float64,
78
+ np.complex64: torch.complex64,
79
+ np.complex128: torch.complex128,
80
+ }
81
+ if np.version.full_version >= "1.24.0":
82
+ numpy_to_torch_dtype_dict[np.bool_] = torch.bool
83
+ else:
84
+ numpy_to_torch_dtype_dict[np.bool] = torch.bool
85
+
86
+ # Map of torch dtype -> numpy dtype
87
+ torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
88
+
89
+
90
+ def device_view(t):
91
+ return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
92
+
93
+
94
+ def preprocess_image(image):
95
+ """
96
+ image: torch.Tensor
97
+ """
98
+ w, h = image.size
99
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
100
+ image = image.resize((w, h))
101
+ image = np.array(image).astype(np.float32) / 255.0
102
+ image = image[None].transpose(0, 3, 1, 2)
103
+ image = torch.from_numpy(image).contiguous()
104
+ return 2.0 * image - 1.0
105
+
106
+
107
+ class Engine:
108
+ def __init__(self, engine_path):
109
+ self.engine_path = engine_path
110
+ self.engine = None
111
+ self.context = None
112
+ self.buffers = OrderedDict()
113
+ self.tensors = OrderedDict()
114
+
115
+ def __del__(self):
116
+ [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
117
+ del self.engine
118
+ del self.context
119
+ del self.buffers
120
+ del self.tensors
121
+
122
+ def build(
123
+ self,
124
+ onnx_path,
125
+ fp16,
126
+ input_profile=None,
127
+ enable_preview=False,
128
+ enable_all_tactics=False,
129
+ timing_cache=None,
130
+ workspace_size=0,
131
+ ):
132
+ logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
133
+ p = Profile()
134
+ if input_profile:
135
+ for name, dims in input_profile.items():
136
+ assert len(dims) == 3
137
+ p.add(name, min=dims[0], opt=dims[1], max=dims[2])
138
+
139
+ config_kwargs = {}
140
+
141
+ config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
142
+ if enable_preview:
143
+ # Faster dynamic shapes made optional since it increases engine build time.
144
+ config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
145
+ if workspace_size > 0:
146
+ config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
147
+ if not enable_all_tactics:
148
+ config_kwargs["tactic_sources"] = []
149
+
150
+ engine = engine_from_network(
151
+ network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
152
+ config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
153
+ save_timing_cache=timing_cache,
154
+ )
155
+ save_engine(engine, path=self.engine_path)
156
+
157
+ def load(self):
158
+ logger.warning(f"Loading TensorRT engine: {self.engine_path}")
159
+ self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
160
+
161
+ def activate(self):
162
+ self.context = self.engine.create_execution_context()
163
+
164
+ def allocate_buffers(self, shape_dict=None, device="cuda"):
165
+ for idx in range(trt_util.get_bindings_per_profile(self.engine)):
166
+ binding = self.engine[idx]
167
+ if shape_dict and binding in shape_dict:
168
+ shape = shape_dict[binding]
169
+ else:
170
+ shape = self.engine.get_binding_shape(binding)
171
+ dtype = trt.nptype(self.engine.get_binding_dtype(binding))
172
+ if self.engine.binding_is_input(binding):
173
+ self.context.set_binding_shape(idx, shape)
174
+ tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
175
+ self.tensors[binding] = tensor
176
+ self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
177
+
178
+ def infer(self, feed_dict, stream):
179
+ start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
180
+ # shallow copy of ordered dict
181
+ device_buffers = copy(self.buffers)
182
+ for name, buf in feed_dict.items():
183
+ assert isinstance(buf, cuda.DeviceView)
184
+ device_buffers[name] = buf
185
+ bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]
186
+ noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)
187
+ if not noerror:
188
+ raise ValueError("ERROR: inference failed.")
189
+
190
+ return self.tensors
191
+
192
+
193
+ class Optimizer:
194
+ def __init__(self, onnx_graph):
195
+ self.graph = gs.import_onnx(onnx_graph)
196
+
197
+ def cleanup(self, return_onnx=False):
198
+ self.graph.cleanup().toposort()
199
+ if return_onnx:
200
+ return gs.export_onnx(self.graph)
201
+
202
+ def select_outputs(self, keep, names=None):
203
+ self.graph.outputs = [self.graph.outputs[o] for o in keep]
204
+ if names:
205
+ for i, name in enumerate(names):
206
+ self.graph.outputs[i].name = name
207
+
208
+ def fold_constants(self, return_onnx=False):
209
+ onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
210
+ self.graph = gs.import_onnx(onnx_graph)
211
+ if return_onnx:
212
+ return onnx_graph
213
+
214
+ def infer_shapes(self, return_onnx=False):
215
+ onnx_graph = gs.export_onnx(self.graph)
216
+ if onnx_graph.ByteSize() > 2147483648:
217
+ raise TypeError("ERROR: model size exceeds supported 2GB limit")
218
+ else:
219
+ onnx_graph = shape_inference.infer_shapes(onnx_graph)
220
+
221
+ self.graph = gs.import_onnx(onnx_graph)
222
+ if return_onnx:
223
+ return onnx_graph
224
+
225
+
226
+ class BaseModel:
227
+ def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77):
228
+ self.model = model
229
+ self.name = "SD Model"
230
+ self.fp16 = fp16
231
+ self.device = device
232
+
233
+ self.min_batch = 1
234
+ self.max_batch = max_batch_size
235
+ self.min_image_shape = 256 # min image resolution: 256x256
236
+ self.max_image_shape = 1024 # max image resolution: 1024x1024
237
+ self.min_latent_shape = self.min_image_shape // 8
238
+ self.max_latent_shape = self.max_image_shape // 8
239
+
240
+ self.embedding_dim = embedding_dim
241
+ self.text_maxlen = text_maxlen
242
+
243
+ def get_model(self):
244
+ return self.model
245
+
246
+ def get_input_names(self):
247
+ pass
248
+
249
+ def get_output_names(self):
250
+ pass
251
+
252
+ def get_dynamic_axes(self):
253
+ return None
254
+
255
+ def get_sample_input(self, batch_size, image_height, image_width):
256
+ pass
257
+
258
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
259
+ return None
260
+
261
+ def get_shape_dict(self, batch_size, image_height, image_width):
262
+ return None
263
+
264
+ def optimize(self, onnx_graph):
265
+ opt = Optimizer(onnx_graph)
266
+ opt.cleanup()
267
+ opt.fold_constants()
268
+ opt.infer_shapes()
269
+ onnx_opt_graph = opt.cleanup(return_onnx=True)
270
+ return onnx_opt_graph
271
+
272
+ def check_dims(self, batch_size, image_height, image_width):
273
+ assert batch_size >= self.min_batch and batch_size <= self.max_batch
274
+ assert image_height % 8 == 0 or image_width % 8 == 0
275
+ latent_height = image_height // 8
276
+ latent_width = image_width // 8
277
+ assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
278
+ assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
279
+ return (latent_height, latent_width)
280
+
281
+ def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
282
+ min_batch = batch_size if static_batch else self.min_batch
283
+ max_batch = batch_size if static_batch else self.max_batch
284
+ latent_height = image_height // 8
285
+ latent_width = image_width // 8
286
+ min_image_height = image_height if static_shape else self.min_image_shape
287
+ max_image_height = image_height if static_shape else self.max_image_shape
288
+ min_image_width = image_width if static_shape else self.min_image_shape
289
+ max_image_width = image_width if static_shape else self.max_image_shape
290
+ min_latent_height = latent_height if static_shape else self.min_latent_shape
291
+ max_latent_height = latent_height if static_shape else self.max_latent_shape
292
+ min_latent_width = latent_width if static_shape else self.min_latent_shape
293
+ max_latent_width = latent_width if static_shape else self.max_latent_shape
294
+ return (
295
+ min_batch,
296
+ max_batch,
297
+ min_image_height,
298
+ max_image_height,
299
+ min_image_width,
300
+ max_image_width,
301
+ min_latent_height,
302
+ max_latent_height,
303
+ min_latent_width,
304
+ max_latent_width,
305
+ )
306
+
307
+
308
+ def getOnnxPath(model_name, onnx_dir, opt=True):
309
+ return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx")
310
+
311
+
312
+ def getEnginePath(model_name, engine_dir):
313
+ return os.path.join(engine_dir, model_name + ".plan")
314
+
315
+
316
+ def build_engines(
317
+ models: dict,
318
+ engine_dir,
319
+ onnx_dir,
320
+ onnx_opset,
321
+ opt_image_height,
322
+ opt_image_width,
323
+ opt_batch_size=1,
324
+ force_engine_rebuild=False,
325
+ static_batch=False,
326
+ static_shape=True,
327
+ enable_preview=False,
328
+ enable_all_tactics=False,
329
+ timing_cache=None,
330
+ max_workspace_size=0,
331
+ ):
332
+ built_engines = {}
333
+ if not os.path.isdir(onnx_dir):
334
+ os.makedirs(onnx_dir)
335
+ if not os.path.isdir(engine_dir):
336
+ os.makedirs(engine_dir)
337
+
338
+ # Export models to ONNX
339
+ for model_name, model_obj in models.items():
340
+ engine_path = getEnginePath(model_name, engine_dir)
341
+ if force_engine_rebuild or not os.path.exists(engine_path):
342
+ logger.warning("Building Engines...")
343
+ logger.warning("Engine build can take a while to complete")
344
+ onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
345
+ onnx_opt_path = getOnnxPath(model_name, onnx_dir)
346
+ if force_engine_rebuild or not os.path.exists(onnx_opt_path):
347
+ if force_engine_rebuild or not os.path.exists(onnx_path):
348
+ logger.warning(f"Exporting model: {onnx_path}")
349
+ model = model_obj.get_model()
350
+ with torch.inference_mode(), torch.autocast("cuda"):
351
+ inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
352
+ torch.onnx.export(
353
+ model,
354
+ inputs,
355
+ onnx_path,
356
+ export_params=True,
357
+ opset_version=onnx_opset,
358
+ do_constant_folding=True,
359
+ input_names=model_obj.get_input_names(),
360
+ output_names=model_obj.get_output_names(),
361
+ dynamic_axes=model_obj.get_dynamic_axes(),
362
+ )
363
+ del model
364
+ torch.cuda.empty_cache()
365
+ gc.collect()
366
+ else:
367
+ logger.warning(f"Found cached model: {onnx_path}")
368
+
369
+ # Optimize onnx
370
+ if force_engine_rebuild or not os.path.exists(onnx_opt_path):
371
+ logger.warning(f"Generating optimizing model: {onnx_opt_path}")
372
+ onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))
373
+ onnx.save(onnx_opt_graph, onnx_opt_path)
374
+ else:
375
+ logger.warning(f"Found cached optimized model: {onnx_opt_path} ")
376
+
377
+ # Build TensorRT engines
378
+ for model_name, model_obj in models.items():
379
+ engine_path = getEnginePath(model_name, engine_dir)
380
+ engine = Engine(engine_path)
381
+ onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
382
+ onnx_opt_path = getOnnxPath(model_name, onnx_dir)
383
+
384
+ if force_engine_rebuild or not os.path.exists(engine.engine_path):
385
+ engine.build(
386
+ onnx_opt_path,
387
+ fp16=True,
388
+ input_profile=model_obj.get_input_profile(
389
+ opt_batch_size,
390
+ opt_image_height,
391
+ opt_image_width,
392
+ static_batch=static_batch,
393
+ static_shape=static_shape,
394
+ ),
395
+ enable_preview=enable_preview,
396
+ timing_cache=timing_cache,
397
+ workspace_size=max_workspace_size,
398
+ )
399
+ built_engines[model_name] = engine
400
+
401
+ # Load and activate TensorRT engines
402
+ for model_name, model_obj in models.items():
403
+ engine = built_engines[model_name]
404
+ engine.load()
405
+ engine.activate()
406
+
407
+ return built_engines
408
+
409
+
410
+ def runEngine(engine, feed_dict, stream):
411
+ return engine.infer(feed_dict, stream)
412
+
413
+
414
+ class CLIP(BaseModel):
415
+ def __init__(self, model, device, max_batch_size, embedding_dim):
416
+ super(CLIP, self).__init__(
417
+ model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
418
+ )
419
+ self.name = "CLIP"
420
+
421
+ def get_input_names(self):
422
+ return ["input_ids"]
423
+
424
+ def get_output_names(self):
425
+ return ["text_embeddings", "pooler_output"]
426
+
427
+ def get_dynamic_axes(self):
428
+ return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}}
429
+
430
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
431
+ self.check_dims(batch_size, image_height, image_width)
432
+ min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(
433
+ batch_size, image_height, image_width, static_batch, static_shape
434
+ )
435
+ return {
436
+ "input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]
437
+ }
438
+
439
+ def get_shape_dict(self, batch_size, image_height, image_width):
440
+ self.check_dims(batch_size, image_height, image_width)
441
+ return {
442
+ "input_ids": (batch_size, self.text_maxlen),
443
+ "text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim),
444
+ }
445
+
446
+ def get_sample_input(self, batch_size, image_height, image_width):
447
+ self.check_dims(batch_size, image_height, image_width)
448
+ return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)
449
+
450
+ def optimize(self, onnx_graph):
451
+ opt = Optimizer(onnx_graph)
452
+ opt.select_outputs([0]) # delete graph output#1
453
+ opt.cleanup()
454
+ opt.fold_constants()
455
+ opt.infer_shapes()
456
+ opt.select_outputs([0], names=["text_embeddings"]) # rename network output
457
+ opt_onnx_graph = opt.cleanup(return_onnx=True)
458
+ return opt_onnx_graph
459
+
460
+
461
+ def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):
462
+ return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
463
+
464
+
465
+ class UNet(BaseModel):
466
+ def __init__(
467
+ self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4
468
+ ):
469
+ super(UNet, self).__init__(
470
+ model=model,
471
+ fp16=fp16,
472
+ device=device,
473
+ max_batch_size=max_batch_size,
474
+ embedding_dim=embedding_dim,
475
+ text_maxlen=text_maxlen,
476
+ )
477
+ self.unet_dim = unet_dim
478
+ self.name = "UNet"
479
+
480
+ def get_input_names(self):
481
+ return ["sample", "timestep", "encoder_hidden_states"]
482
+
483
+ def get_output_names(self):
484
+ return ["latent"]
485
+
486
+ def get_dynamic_axes(self):
487
+ return {
488
+ "sample": {0: "2B", 2: "H", 3: "W"},
489
+ "encoder_hidden_states": {0: "2B"},
490
+ "latent": {0: "2B", 2: "H", 3: "W"},
491
+ }
492
+
493
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
494
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
495
+ (
496
+ min_batch,
497
+ max_batch,
498
+ _,
499
+ _,
500
+ _,
501
+ _,
502
+ min_latent_height,
503
+ max_latent_height,
504
+ min_latent_width,
505
+ max_latent_width,
506
+ ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
507
+ return {
508
+ "sample": [
509
+ (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),
510
+ (2 * batch_size, self.unet_dim, latent_height, latent_width),
511
+ (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),
512
+ ],
513
+ "encoder_hidden_states": [
514
+ (2 * min_batch, self.text_maxlen, self.embedding_dim),
515
+ (2 * batch_size, self.text_maxlen, self.embedding_dim),
516
+ (2 * max_batch, self.text_maxlen, self.embedding_dim),
517
+ ],
518
+ }
519
+
520
+ def get_shape_dict(self, batch_size, image_height, image_width):
521
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
522
+ return {
523
+ "sample": (2 * batch_size, self.unet_dim, latent_height, latent_width),
524
+ "encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim),
525
+ "latent": (2 * batch_size, 4, latent_height, latent_width),
526
+ }
527
+
528
+ def get_sample_input(self, batch_size, image_height, image_width):
529
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
530
+ dtype = torch.float16 if self.fp16 else torch.float32
531
+ return (
532
+ torch.randn(
533
+ 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device
534
+ ),
535
+ torch.tensor([1.0], dtype=torch.float32, device=self.device),
536
+ torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),
537
+ )
538
+
539
+
540
+ def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False, unet_dim=4):
541
+ return UNet(
542
+ model,
543
+ fp16=True,
544
+ device=device,
545
+ max_batch_size=max_batch_size,
546
+ embedding_dim=embedding_dim,
547
+ unet_dim=unet_dim,
548
+ )
549
+
550
+
551
+ class VAE(BaseModel):
552
+ def __init__(self, model, device, max_batch_size, embedding_dim):
553
+ super(VAE, self).__init__(
554
+ model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
555
+ )
556
+ self.name = "VAE decoder"
557
+
558
+ def get_input_names(self):
559
+ return ["latent"]
560
+
561
+ def get_output_names(self):
562
+ return ["images"]
563
+
564
+ def get_dynamic_axes(self):
565
+ return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}}
566
+
567
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
568
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
569
+ (
570
+ min_batch,
571
+ max_batch,
572
+ _,
573
+ _,
574
+ _,
575
+ _,
576
+ min_latent_height,
577
+ max_latent_height,
578
+ min_latent_width,
579
+ max_latent_width,
580
+ ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
581
+ return {
582
+ "latent": [
583
+ (min_batch, 4, min_latent_height, min_latent_width),
584
+ (batch_size, 4, latent_height, latent_width),
585
+ (max_batch, 4, max_latent_height, max_latent_width),
586
+ ]
587
+ }
588
+
589
+ def get_shape_dict(self, batch_size, image_height, image_width):
590
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
591
+ return {
592
+ "latent": (batch_size, 4, latent_height, latent_width),
593
+ "images": (batch_size, 3, image_height, image_width),
594
+ }
595
+
596
+ def get_sample_input(self, batch_size, image_height, image_width):
597
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
598
+ return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)
599
+
600
+
601
+ def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):
602
+ return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
603
+
604
+
605
+ class TorchVAEEncoder(torch.nn.Module):
606
+ def __init__(self, model):
607
+ super().__init__()
608
+ self.vae_encoder = model
609
+
610
+ def forward(self, x):
611
+ return self.vae_encoder.encode(x).latent_dist.sample()
612
+
613
+
614
+ class VAEEncoder(BaseModel):
615
+ def __init__(self, model, device, max_batch_size, embedding_dim):
616
+ super(VAEEncoder, self).__init__(
617
+ model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
618
+ )
619
+ self.name = "VAE encoder"
620
+
621
+ def get_model(self):
622
+ vae_encoder = TorchVAEEncoder(self.model)
623
+ return vae_encoder
624
+
625
+ def get_input_names(self):
626
+ return ["images"]
627
+
628
+ def get_output_names(self):
629
+ return ["latent"]
630
+
631
+ def get_dynamic_axes(self):
632
+ return {"images": {0: "B", 2: "8H", 3: "8W"}, "latent": {0: "B", 2: "H", 3: "W"}}
633
+
634
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
635
+ assert batch_size >= self.min_batch and batch_size <= self.max_batch
636
+ min_batch = batch_size if static_batch else self.min_batch
637
+ max_batch = batch_size if static_batch else self.max_batch
638
+ self.check_dims(batch_size, image_height, image_width)
639
+ (
640
+ min_batch,
641
+ max_batch,
642
+ min_image_height,
643
+ max_image_height,
644
+ min_image_width,
645
+ max_image_width,
646
+ _,
647
+ _,
648
+ _,
649
+ _,
650
+ ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
651
+
652
+ return {
653
+ "images": [
654
+ (min_batch, 3, min_image_height, min_image_width),
655
+ (batch_size, 3, image_height, image_width),
656
+ (max_batch, 3, max_image_height, max_image_width),
657
+ ]
658
+ }
659
+
660
+ def get_shape_dict(self, batch_size, image_height, image_width):
661
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
662
+ return {
663
+ "images": (batch_size, 3, image_height, image_width),
664
+ "latent": (batch_size, 4, latent_height, latent_width),
665
+ }
666
+
667
+ def get_sample_input(self, batch_size, image_height, image_width):
668
+ self.check_dims(batch_size, image_height, image_width)
669
+ return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device)
670
+
671
+
672
+ def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False):
673
+ return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
674
+
675
+
676
+ class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
677
+ r"""
678
+ Pipeline for inpainting using TensorRT accelerated Stable Diffusion.
679
+
680
+ This model inherits from [`StableDiffusionInpaintPipeline`]. Check the superclass documentation for the generic methods the
681
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
682
+
683
+ Args:
684
+ vae ([`AutoencoderKL`]):
685
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
686
+ text_encoder ([`CLIPTextModel`]):
687
+ Frozen text-encoder. Stable Diffusion uses the text portion of
688
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
689
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
690
+ tokenizer (`CLIPTokenizer`):
691
+ Tokenizer of class
692
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
693
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
694
+ scheduler ([`SchedulerMixin`]):
695
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
696
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
697
+ safety_checker ([`StableDiffusionSafetyChecker`]):
698
+ Classification module that estimates whether generated images could be considered offensive or harmful.
699
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
700
+ feature_extractor ([`CLIPFeatureExtractor`]):
701
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
702
+ """
703
+
704
+ def __init__(
705
+ self,
706
+ vae: AutoencoderKL,
707
+ text_encoder: CLIPTextModel,
708
+ tokenizer: CLIPTokenizer,
709
+ unet: UNet2DConditionModel,
710
+ scheduler: DDIMScheduler,
711
+ safety_checker: StableDiffusionSafetyChecker,
712
+ feature_extractor: CLIPFeatureExtractor,
713
+ requires_safety_checker: bool = True,
714
+ stages=["clip", "unet", "vae", "vae_encoder"],
715
+ image_height: int = 512,
716
+ image_width: int = 512,
717
+ max_batch_size: int = 16,
718
+ # ONNX export parameters
719
+ onnx_opset: int = 17,
720
+ onnx_dir: str = "onnx",
721
+ # TensorRT engine build parameters
722
+ engine_dir: str = "engine",
723
+ build_preview_features: bool = True,
724
+ force_engine_rebuild: bool = False,
725
+ timing_cache: str = "timing_cache",
726
+ ):
727
+ super().__init__(
728
+ vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker
729
+ )
730
+
731
+ self.vae.forward = self.vae.decode
732
+
733
+ self.stages = stages
734
+ self.image_height, self.image_width = image_height, image_width
735
+ self.inpaint = True
736
+ self.onnx_opset = onnx_opset
737
+ self.onnx_dir = onnx_dir
738
+ self.engine_dir = engine_dir
739
+ self.force_engine_rebuild = force_engine_rebuild
740
+ self.timing_cache = timing_cache
741
+ self.build_static_batch = False
742
+ self.build_dynamic_shape = False
743
+ self.build_preview_features = build_preview_features
744
+
745
+ self.max_batch_size = max_batch_size
746
+ # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
747
+ if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:
748
+ self.max_batch_size = 4
749
+
750
+ self.stream = None # loaded in loadResources()
751
+ self.models = {} # loaded in __loadModels()
752
+ self.engine = {} # loaded in build_engines()
753
+
754
+ def __loadModels(self):
755
+ # Load pipeline models
756
+ self.embedding_dim = self.text_encoder.config.hidden_size
757
+ models_args = {
758
+ "device": self.torch_device,
759
+ "max_batch_size": self.max_batch_size,
760
+ "embedding_dim": self.embedding_dim,
761
+ "inpaint": self.inpaint,
762
+ }
763
+ if "clip" in self.stages:
764
+ self.models["clip"] = make_CLIP(self.text_encoder, **models_args)
765
+ if "unet" in self.stages:
766
+ self.models["unet"] = make_UNet(self.unet, **models_args, unet_dim=self.unet.config.in_channels)
767
+ if "vae" in self.stages:
768
+ self.models["vae"] = make_VAE(self.vae, **models_args)
769
+ if "vae_encoder" in self.stages:
770
+ self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
771
+
772
+ @classmethod
773
+ def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
774
+ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
775
+ resume_download = kwargs.pop("resume_download", False)
776
+ proxies = kwargs.pop("proxies", None)
777
+ local_files_only = kwargs.pop("local_files_only", False)
778
+ use_auth_token = kwargs.pop("use_auth_token", None)
779
+ revision = kwargs.pop("revision", None)
780
+
781
+ cls.cached_folder = (
782
+ pretrained_model_name_or_path
783
+ if os.path.isdir(pretrained_model_name_or_path)
784
+ else snapshot_download(
785
+ pretrained_model_name_or_path,
786
+ cache_dir=cache_dir,
787
+ resume_download=resume_download,
788
+ proxies=proxies,
789
+ local_files_only=local_files_only,
790
+ use_auth_token=use_auth_token,
791
+ revision=revision,
792
+ )
793
+ )
794
+
795
+ def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):
796
+ super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)
797
+
798
+ self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)
799
+ self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)
800
+ self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)
801
+
802
+ # set device
803
+ self.torch_device = self._execution_device
804
+ logger.warning(f"Running inference on device: {self.torch_device}")
805
+
806
+ # load models
807
+ self.__loadModels()
808
+
809
+ # build engines
810
+ self.engine = build_engines(
811
+ self.models,
812
+ self.engine_dir,
813
+ self.onnx_dir,
814
+ self.onnx_opset,
815
+ opt_image_height=self.image_height,
816
+ opt_image_width=self.image_width,
817
+ force_engine_rebuild=self.force_engine_rebuild,
818
+ static_batch=self.build_static_batch,
819
+ static_shape=not self.build_dynamic_shape,
820
+ enable_preview=self.build_preview_features,
821
+ timing_cache=self.timing_cache,
822
+ )
823
+
824
+ return self
825
+
826
+ def __initialize_timesteps(self, num_inference_steps, strength):
827
+ self.scheduler.set_timesteps(num_inference_steps)
828
+ offset = self.scheduler.config.steps_offset if hasattr(self.scheduler, "steps_offset") else 0
829
+ init_timestep = int(num_inference_steps * strength) + offset
830
+ init_timestep = min(init_timestep, num_inference_steps)
831
+ t_start = max(num_inference_steps - init_timestep + offset, 0)
832
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :].to(self.torch_device)
833
+ return timesteps, num_inference_steps - t_start
834
+
835
+ def __preprocess_images(self, batch_size, images=()):
836
+ init_images = []
837
+ for image in images:
838
+ image = image.to(self.torch_device).float()
839
+ image = image.repeat(batch_size, 1, 1, 1)
840
+ init_images.append(image)
841
+ return tuple(init_images)
842
+
843
+ def __encode_image(self, init_image):
844
+ init_latents = runEngine(self.engine["vae_encoder"], {"images": device_view(init_image)}, self.stream)[
845
+ "latent"
846
+ ]
847
+ init_latents = 0.18215 * init_latents
848
+ return init_latents
849
+
850
+ def __encode_prompt(self, prompt, negative_prompt):
851
+ r"""
852
+ Encodes the prompt into text encoder hidden states.
853
+
854
+ Args:
855
+ prompt (`str` or `List[str]`, *optional*):
856
+ prompt to be encoded
857
+ negative_prompt (`str` or `List[str]`, *optional*):
858
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
859
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
860
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
861
+ """
862
+ # Tokenize prompt
863
+ text_input_ids = (
864
+ self.tokenizer(
865
+ prompt,
866
+ padding="max_length",
867
+ max_length=self.tokenizer.model_max_length,
868
+ truncation=True,
869
+ return_tensors="pt",
870
+ )
871
+ .input_ids.type(torch.int32)
872
+ .to(self.torch_device)
873
+ )
874
+
875
+ text_input_ids_inp = device_view(text_input_ids)
876
+ # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
877
+ text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[
878
+ "text_embeddings"
879
+ ].clone()
880
+
881
+ # Tokenize negative prompt
882
+ uncond_input_ids = (
883
+ self.tokenizer(
884
+ negative_prompt,
885
+ padding="max_length",
886
+ max_length=self.tokenizer.model_max_length,
887
+ truncation=True,
888
+ return_tensors="pt",
889
+ )
890
+ .input_ids.type(torch.int32)
891
+ .to(self.torch_device)
892
+ )
893
+ uncond_input_ids_inp = device_view(uncond_input_ids)
894
+ uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
895
+ "text_embeddings"
896
+ ]
897
+
898
+ # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance
899
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)
900
+
901
+ return text_embeddings
902
+
903
+ def __denoise_latent(
904
+ self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None
905
+ ):
906
+ if not isinstance(timesteps, torch.Tensor):
907
+ timesteps = self.scheduler.timesteps
908
+ for step_index, timestep in enumerate(timesteps):
909
+ # Expand the latents if we are doing classifier free guidance
910
+ latent_model_input = torch.cat([latents] * 2)
911
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
912
+ if isinstance(mask, torch.Tensor):
913
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
914
+
915
+ # Predict the noise residual
916
+ timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
917
+
918
+ sample_inp = device_view(latent_model_input)
919
+ timestep_inp = device_view(timestep_float)
920
+ embeddings_inp = device_view(text_embeddings)
921
+ noise_pred = runEngine(
922
+ self.engine["unet"],
923
+ {"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp},
924
+ self.stream,
925
+ )["latent"]
926
+
927
+ # Perform guidance
928
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
929
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
930
+
931
+ latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
932
+
933
+ latents = 1.0 / 0.18215 * latents
934
+ return latents
935
+
936
+ def __decode_latent(self, latents):
937
+ images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"]
938
+ images = (images / 2 + 0.5).clamp(0, 1)
939
+ return images.cpu().permute(0, 2, 3, 1).float().numpy()
940
+
941
+ def __loadResources(self, image_height, image_width, batch_size):
942
+ self.stream = cuda.Stream()
943
+
944
+ # Allocate buffers for TensorRT engine bindings
945
+ for model_name, obj in self.models.items():
946
+ self.engine[model_name].allocate_buffers(
947
+ shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device
948
+ )
949
+
950
+ @torch.no_grad()
951
+ def __call__(
952
+ self,
953
+ prompt: Union[str, List[str]] = None,
954
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
955
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
956
+ strength: float = 1.0,
957
+ num_inference_steps: int = 50,
958
+ guidance_scale: float = 7.5,
959
+ negative_prompt: Optional[Union[str, List[str]]] = None,
960
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
961
+ ):
962
+ r"""
963
+ Function invoked when calling the pipeline for generation.
964
+
965
+ Args:
966
+ prompt (`str` or `List[str]`, *optional*):
967
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
968
+ instead.
969
+ image (`PIL.Image.Image`):
970
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
971
+ be masked out with `mask_image` and repainted according to `prompt`.
972
+ mask_image (`PIL.Image.Image`):
973
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
974
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
975
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
976
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
977
+ strength (`float`, *optional*, defaults to 0.8):
978
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
979
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
980
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
981
+ be maximum and the denoising process will run for the full number of iterations specified in
982
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
983
+ num_inference_steps (`int`, *optional*, defaults to 50):
984
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
985
+ expense of slower inference.
986
+ guidance_scale (`float`, *optional*, defaults to 7.5):
987
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
988
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
989
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
990
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
991
+ usually at the expense of lower image quality.
992
+ negative_prompt (`str` or `List[str]`, *optional*):
993
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
994
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
995
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
996
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
997
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
998
+ to make generation deterministic.
999
+
1000
+ """
1001
+ self.generator = generator
1002
+ self.denoising_steps = num_inference_steps
1003
+ self.guidance_scale = guidance_scale
1004
+
1005
+ # Pre-compute latent input scales and linear multistep coefficients
1006
+ self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)
1007
+
1008
+ # Define call parameters
1009
+ if prompt is not None and isinstance(prompt, str):
1010
+ batch_size = 1
1011
+ prompt = [prompt]
1012
+ elif prompt is not None and isinstance(prompt, list):
1013
+ batch_size = len(prompt)
1014
+ else:
1015
+ raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}")
1016
+
1017
+ if negative_prompt is None:
1018
+ negative_prompt = [""] * batch_size
1019
+
1020
+ if negative_prompt is not None and isinstance(negative_prompt, str):
1021
+ negative_prompt = [negative_prompt]
1022
+
1023
+ assert len(prompt) == len(negative_prompt)
1024
+
1025
+ if batch_size > self.max_batch_size:
1026
+ raise ValueError(
1027
+ f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4"
1028
+ )
1029
+
1030
+ # Validate image dimensions
1031
+ mask_width, mask_height = mask_image.size
1032
+ if mask_height != self.image_height or mask_width != self.image_width:
1033
+ raise ValueError(
1034
+ f"Input image height and width {self.image_height} and {self.image_width} are not equal to "
1035
+ f"the respective dimensions of the mask image {mask_height} and {mask_width}"
1036
+ )
1037
+
1038
+ # load resources
1039
+ self.__loadResources(self.image_height, self.image_width, batch_size)
1040
+
1041
+ with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER):
1042
+ # Spatial dimensions of latent tensor
1043
+ latent_height = self.image_height // 8
1044
+ latent_width = self.image_width // 8
1045
+
1046
+ # Pre-process input images
1047
+ mask, masked_image, init_image = self.__preprocess_images(
1048
+ batch_size,
1049
+ prepare_mask_and_masked_image(
1050
+ image,
1051
+ mask_image,
1052
+ self.image_height,
1053
+ self.image_width,
1054
+ return_image=True,
1055
+ ),
1056
+ )
1057
+
1058
+ mask = torch.nn.functional.interpolate(mask, size=(latent_height, latent_width))
1059
+ mask = torch.cat([mask] * 2)
1060
+
1061
+ # Initialize timesteps
1062
+ timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength)
1063
+
1064
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1065
+ latent_timestep = timesteps[:1].repeat(batch_size)
1066
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1067
+ is_strength_max = strength == 1.0
1068
+
1069
+ # Pre-initialize latents
1070
+ num_channels_latents = self.vae.config.latent_channels
1071
+ latents_outputs = self.prepare_latents(
1072
+ batch_size,
1073
+ num_channels_latents,
1074
+ self.image_height,
1075
+ self.image_width,
1076
+ torch.float32,
1077
+ self.torch_device,
1078
+ generator,
1079
+ image=init_image,
1080
+ timestep=latent_timestep,
1081
+ is_strength_max=is_strength_max,
1082
+ )
1083
+
1084
+ latents = latents_outputs[0]
1085
+
1086
+ # VAE encode masked image
1087
+ masked_latents = self.__encode_image(masked_image)
1088
+ masked_latents = torch.cat([masked_latents] * 2)
1089
+
1090
+ # CLIP text encoder
1091
+ text_embeddings = self.__encode_prompt(prompt, negative_prompt)
1092
+
1093
+ # UNet denoiser
1094
+ latents = self.__denoise_latent(
1095
+ latents,
1096
+ text_embeddings,
1097
+ timesteps=timesteps,
1098
+ step_offset=t_start,
1099
+ mask=mask,
1100
+ masked_image_latents=masked_latents,
1101
+ )
1102
+
1103
+ # VAE decode latent
1104
+ images = self.__decode_latent(latents)
1105
+
1106
+ images = self.numpy_to_pil(images)
1107
+ return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None)
v0.24.0/stable_diffusion_tensorrt_txt2img.py ADDED
@@ -0,0 +1,928 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ # SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ import gc
19
+ import os
20
+ from collections import OrderedDict
21
+ from copy import copy
22
+ from typing import List, Optional, Union
23
+
24
+ import numpy as np
25
+ import onnx
26
+ import onnx_graphsurgeon as gs
27
+ import tensorrt as trt
28
+ import torch
29
+ from huggingface_hub import snapshot_download
30
+ from onnx import shape_inference
31
+ from polygraphy import cuda
32
+ from polygraphy.backend.common import bytes_from_path
33
+ from polygraphy.backend.onnx.loader import fold_constants
34
+ from polygraphy.backend.trt import (
35
+ CreateConfig,
36
+ Profile,
37
+ engine_from_bytes,
38
+ engine_from_network,
39
+ network_from_onnx_path,
40
+ save_engine,
41
+ )
42
+ from polygraphy.backend.trt import util as trt_util
43
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
44
+
45
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
46
+ from diffusers.pipelines.stable_diffusion import (
47
+ StableDiffusionPipeline,
48
+ StableDiffusionPipelineOutput,
49
+ StableDiffusionSafetyChecker,
50
+ )
51
+ from diffusers.schedulers import DDIMScheduler
52
+ from diffusers.utils import DIFFUSERS_CACHE, logging
53
+
54
+
55
+ """
56
+ Installation instructions
57
+ python3 -m pip install --upgrade transformers diffusers>=0.16.0
58
+ python3 -m pip install --upgrade tensorrt>=8.6.1
59
+ python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
60
+ python3 -m pip install onnxruntime
61
+ """
62
+
63
+ TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
64
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
65
+
66
+ # Map of numpy dtype -> torch dtype
67
+ numpy_to_torch_dtype_dict = {
68
+ np.uint8: torch.uint8,
69
+ np.int8: torch.int8,
70
+ np.int16: torch.int16,
71
+ np.int32: torch.int32,
72
+ np.int64: torch.int64,
73
+ np.float16: torch.float16,
74
+ np.float32: torch.float32,
75
+ np.float64: torch.float64,
76
+ np.complex64: torch.complex64,
77
+ np.complex128: torch.complex128,
78
+ }
79
+ if np.version.full_version >= "1.24.0":
80
+ numpy_to_torch_dtype_dict[np.bool_] = torch.bool
81
+ else:
82
+ numpy_to_torch_dtype_dict[np.bool] = torch.bool
83
+
84
+ # Map of torch dtype -> numpy dtype
85
+ torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
86
+
87
+
88
+ def device_view(t):
89
+ return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
90
+
91
+
92
+ class Engine:
93
+ def __init__(self, engine_path):
94
+ self.engine_path = engine_path
95
+ self.engine = None
96
+ self.context = None
97
+ self.buffers = OrderedDict()
98
+ self.tensors = OrderedDict()
99
+
100
+ def __del__(self):
101
+ [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
102
+ del self.engine
103
+ del self.context
104
+ del self.buffers
105
+ del self.tensors
106
+
107
+ def build(
108
+ self,
109
+ onnx_path,
110
+ fp16,
111
+ input_profile=None,
112
+ enable_preview=False,
113
+ enable_all_tactics=False,
114
+ timing_cache=None,
115
+ workspace_size=0,
116
+ ):
117
+ logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
118
+ p = Profile()
119
+ if input_profile:
120
+ for name, dims in input_profile.items():
121
+ assert len(dims) == 3
122
+ p.add(name, min=dims[0], opt=dims[1], max=dims[2])
123
+
124
+ config_kwargs = {}
125
+
126
+ config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
127
+ if enable_preview:
128
+ # Faster dynamic shapes made optional since it increases engine build time.
129
+ config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
130
+ if workspace_size > 0:
131
+ config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
132
+ if not enable_all_tactics:
133
+ config_kwargs["tactic_sources"] = []
134
+
135
+ engine = engine_from_network(
136
+ network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
137
+ config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
138
+ save_timing_cache=timing_cache,
139
+ )
140
+ save_engine(engine, path=self.engine_path)
141
+
142
+ def load(self):
143
+ logger.warning(f"Loading TensorRT engine: {self.engine_path}")
144
+ self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
145
+
146
+ def activate(self):
147
+ self.context = self.engine.create_execution_context()
148
+
149
+ def allocate_buffers(self, shape_dict=None, device="cuda"):
150
+ for idx in range(trt_util.get_bindings_per_profile(self.engine)):
151
+ binding = self.engine[idx]
152
+ if shape_dict and binding in shape_dict:
153
+ shape = shape_dict[binding]
154
+ else:
155
+ shape = self.engine.get_binding_shape(binding)
156
+ dtype = trt.nptype(self.engine.get_binding_dtype(binding))
157
+ if self.engine.binding_is_input(binding):
158
+ self.context.set_binding_shape(idx, shape)
159
+ tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
160
+ self.tensors[binding] = tensor
161
+ self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
162
+
163
+ def infer(self, feed_dict, stream):
164
+ start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
165
+ # shallow copy of ordered dict
166
+ device_buffers = copy(self.buffers)
167
+ for name, buf in feed_dict.items():
168
+ assert isinstance(buf, cuda.DeviceView)
169
+ device_buffers[name] = buf
170
+ bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]
171
+ noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)
172
+ if not noerror:
173
+ raise ValueError("ERROR: inference failed.")
174
+
175
+ return self.tensors
176
+
177
+
178
+ class Optimizer:
179
+ def __init__(self, onnx_graph):
180
+ self.graph = gs.import_onnx(onnx_graph)
181
+
182
+ def cleanup(self, return_onnx=False):
183
+ self.graph.cleanup().toposort()
184
+ if return_onnx:
185
+ return gs.export_onnx(self.graph)
186
+
187
+ def select_outputs(self, keep, names=None):
188
+ self.graph.outputs = [self.graph.outputs[o] for o in keep]
189
+ if names:
190
+ for i, name in enumerate(names):
191
+ self.graph.outputs[i].name = name
192
+
193
+ def fold_constants(self, return_onnx=False):
194
+ onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
195
+ self.graph = gs.import_onnx(onnx_graph)
196
+ if return_onnx:
197
+ return onnx_graph
198
+
199
+ def infer_shapes(self, return_onnx=False):
200
+ onnx_graph = gs.export_onnx(self.graph)
201
+ if onnx_graph.ByteSize() > 2147483648:
202
+ raise TypeError("ERROR: model size exceeds supported 2GB limit")
203
+ else:
204
+ onnx_graph = shape_inference.infer_shapes(onnx_graph)
205
+
206
+ self.graph = gs.import_onnx(onnx_graph)
207
+ if return_onnx:
208
+ return onnx_graph
209
+
210
+
211
+ class BaseModel:
212
+ def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77):
213
+ self.model = model
214
+ self.name = "SD Model"
215
+ self.fp16 = fp16
216
+ self.device = device
217
+
218
+ self.min_batch = 1
219
+ self.max_batch = max_batch_size
220
+ self.min_image_shape = 256 # min image resolution: 256x256
221
+ self.max_image_shape = 1024 # max image resolution: 1024x1024
222
+ self.min_latent_shape = self.min_image_shape // 8
223
+ self.max_latent_shape = self.max_image_shape // 8
224
+
225
+ self.embedding_dim = embedding_dim
226
+ self.text_maxlen = text_maxlen
227
+
228
+ def get_model(self):
229
+ return self.model
230
+
231
+ def get_input_names(self):
232
+ pass
233
+
234
+ def get_output_names(self):
235
+ pass
236
+
237
+ def get_dynamic_axes(self):
238
+ return None
239
+
240
+ def get_sample_input(self, batch_size, image_height, image_width):
241
+ pass
242
+
243
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
244
+ return None
245
+
246
+ def get_shape_dict(self, batch_size, image_height, image_width):
247
+ return None
248
+
249
+ def optimize(self, onnx_graph):
250
+ opt = Optimizer(onnx_graph)
251
+ opt.cleanup()
252
+ opt.fold_constants()
253
+ opt.infer_shapes()
254
+ onnx_opt_graph = opt.cleanup(return_onnx=True)
255
+ return onnx_opt_graph
256
+
257
+ def check_dims(self, batch_size, image_height, image_width):
258
+ assert batch_size >= self.min_batch and batch_size <= self.max_batch
259
+ assert image_height % 8 == 0 or image_width % 8 == 0
260
+ latent_height = image_height // 8
261
+ latent_width = image_width // 8
262
+ assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
263
+ assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
264
+ return (latent_height, latent_width)
265
+
266
+ def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
267
+ min_batch = batch_size if static_batch else self.min_batch
268
+ max_batch = batch_size if static_batch else self.max_batch
269
+ latent_height = image_height // 8
270
+ latent_width = image_width // 8
271
+ min_image_height = image_height if static_shape else self.min_image_shape
272
+ max_image_height = image_height if static_shape else self.max_image_shape
273
+ min_image_width = image_width if static_shape else self.min_image_shape
274
+ max_image_width = image_width if static_shape else self.max_image_shape
275
+ min_latent_height = latent_height if static_shape else self.min_latent_shape
276
+ max_latent_height = latent_height if static_shape else self.max_latent_shape
277
+ min_latent_width = latent_width if static_shape else self.min_latent_shape
278
+ max_latent_width = latent_width if static_shape else self.max_latent_shape
279
+ return (
280
+ min_batch,
281
+ max_batch,
282
+ min_image_height,
283
+ max_image_height,
284
+ min_image_width,
285
+ max_image_width,
286
+ min_latent_height,
287
+ max_latent_height,
288
+ min_latent_width,
289
+ max_latent_width,
290
+ )
291
+
292
+
293
+ def getOnnxPath(model_name, onnx_dir, opt=True):
294
+ return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx")
295
+
296
+
297
+ def getEnginePath(model_name, engine_dir):
298
+ return os.path.join(engine_dir, model_name + ".plan")
299
+
300
+
301
+ def build_engines(
302
+ models: dict,
303
+ engine_dir,
304
+ onnx_dir,
305
+ onnx_opset,
306
+ opt_image_height,
307
+ opt_image_width,
308
+ opt_batch_size=1,
309
+ force_engine_rebuild=False,
310
+ static_batch=False,
311
+ static_shape=True,
312
+ enable_preview=False,
313
+ enable_all_tactics=False,
314
+ timing_cache=None,
315
+ max_workspace_size=0,
316
+ ):
317
+ built_engines = {}
318
+ if not os.path.isdir(onnx_dir):
319
+ os.makedirs(onnx_dir)
320
+ if not os.path.isdir(engine_dir):
321
+ os.makedirs(engine_dir)
322
+
323
+ # Export models to ONNX
324
+ for model_name, model_obj in models.items():
325
+ engine_path = getEnginePath(model_name, engine_dir)
326
+ if force_engine_rebuild or not os.path.exists(engine_path):
327
+ logger.warning("Building Engines...")
328
+ logger.warning("Engine build can take a while to complete")
329
+ onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
330
+ onnx_opt_path = getOnnxPath(model_name, onnx_dir)
331
+ if force_engine_rebuild or not os.path.exists(onnx_opt_path):
332
+ if force_engine_rebuild or not os.path.exists(onnx_path):
333
+ logger.warning(f"Exporting model: {onnx_path}")
334
+ model = model_obj.get_model()
335
+ with torch.inference_mode(), torch.autocast("cuda"):
336
+ inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
337
+ torch.onnx.export(
338
+ model,
339
+ inputs,
340
+ onnx_path,
341
+ export_params=True,
342
+ opset_version=onnx_opset,
343
+ do_constant_folding=True,
344
+ input_names=model_obj.get_input_names(),
345
+ output_names=model_obj.get_output_names(),
346
+ dynamic_axes=model_obj.get_dynamic_axes(),
347
+ )
348
+ del model
349
+ torch.cuda.empty_cache()
350
+ gc.collect()
351
+ else:
352
+ logger.warning(f"Found cached model: {onnx_path}")
353
+
354
+ # Optimize onnx
355
+ if force_engine_rebuild or not os.path.exists(onnx_opt_path):
356
+ logger.warning(f"Generating optimizing model: {onnx_opt_path}")
357
+ onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))
358
+ onnx.save(onnx_opt_graph, onnx_opt_path)
359
+ else:
360
+ logger.warning(f"Found cached optimized model: {onnx_opt_path} ")
361
+
362
+ # Build TensorRT engines
363
+ for model_name, model_obj in models.items():
364
+ engine_path = getEnginePath(model_name, engine_dir)
365
+ engine = Engine(engine_path)
366
+ onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
367
+ onnx_opt_path = getOnnxPath(model_name, onnx_dir)
368
+
369
+ if force_engine_rebuild or not os.path.exists(engine.engine_path):
370
+ engine.build(
371
+ onnx_opt_path,
372
+ fp16=True,
373
+ input_profile=model_obj.get_input_profile(
374
+ opt_batch_size,
375
+ opt_image_height,
376
+ opt_image_width,
377
+ static_batch=static_batch,
378
+ static_shape=static_shape,
379
+ ),
380
+ enable_preview=enable_preview,
381
+ timing_cache=timing_cache,
382
+ workspace_size=max_workspace_size,
383
+ )
384
+ built_engines[model_name] = engine
385
+
386
+ # Load and activate TensorRT engines
387
+ for model_name, model_obj in models.items():
388
+ engine = built_engines[model_name]
389
+ engine.load()
390
+ engine.activate()
391
+
392
+ return built_engines
393
+
394
+
395
+ def runEngine(engine, feed_dict, stream):
396
+ return engine.infer(feed_dict, stream)
397
+
398
+
399
+ class CLIP(BaseModel):
400
+ def __init__(self, model, device, max_batch_size, embedding_dim):
401
+ super(CLIP, self).__init__(
402
+ model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
403
+ )
404
+ self.name = "CLIP"
405
+
406
+ def get_input_names(self):
407
+ return ["input_ids"]
408
+
409
+ def get_output_names(self):
410
+ return ["text_embeddings", "pooler_output"]
411
+
412
+ def get_dynamic_axes(self):
413
+ return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}}
414
+
415
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
416
+ self.check_dims(batch_size, image_height, image_width)
417
+ min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(
418
+ batch_size, image_height, image_width, static_batch, static_shape
419
+ )
420
+ return {
421
+ "input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]
422
+ }
423
+
424
+ def get_shape_dict(self, batch_size, image_height, image_width):
425
+ self.check_dims(batch_size, image_height, image_width)
426
+ return {
427
+ "input_ids": (batch_size, self.text_maxlen),
428
+ "text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim),
429
+ }
430
+
431
+ def get_sample_input(self, batch_size, image_height, image_width):
432
+ self.check_dims(batch_size, image_height, image_width)
433
+ return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)
434
+
435
+ def optimize(self, onnx_graph):
436
+ opt = Optimizer(onnx_graph)
437
+ opt.select_outputs([0]) # delete graph output#1
438
+ opt.cleanup()
439
+ opt.fold_constants()
440
+ opt.infer_shapes()
441
+ opt.select_outputs([0], names=["text_embeddings"]) # rename network output
442
+ opt_onnx_graph = opt.cleanup(return_onnx=True)
443
+ return opt_onnx_graph
444
+
445
+
446
+ def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):
447
+ return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
448
+
449
+
450
+ class UNet(BaseModel):
451
+ def __init__(
452
+ self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4
453
+ ):
454
+ super(UNet, self).__init__(
455
+ model=model,
456
+ fp16=fp16,
457
+ device=device,
458
+ max_batch_size=max_batch_size,
459
+ embedding_dim=embedding_dim,
460
+ text_maxlen=text_maxlen,
461
+ )
462
+ self.unet_dim = unet_dim
463
+ self.name = "UNet"
464
+
465
+ def get_input_names(self):
466
+ return ["sample", "timestep", "encoder_hidden_states"]
467
+
468
+ def get_output_names(self):
469
+ return ["latent"]
470
+
471
+ def get_dynamic_axes(self):
472
+ return {
473
+ "sample": {0: "2B", 2: "H", 3: "W"},
474
+ "encoder_hidden_states": {0: "2B"},
475
+ "latent": {0: "2B", 2: "H", 3: "W"},
476
+ }
477
+
478
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
479
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
480
+ (
481
+ min_batch,
482
+ max_batch,
483
+ _,
484
+ _,
485
+ _,
486
+ _,
487
+ min_latent_height,
488
+ max_latent_height,
489
+ min_latent_width,
490
+ max_latent_width,
491
+ ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
492
+ return {
493
+ "sample": [
494
+ (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),
495
+ (2 * batch_size, self.unet_dim, latent_height, latent_width),
496
+ (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),
497
+ ],
498
+ "encoder_hidden_states": [
499
+ (2 * min_batch, self.text_maxlen, self.embedding_dim),
500
+ (2 * batch_size, self.text_maxlen, self.embedding_dim),
501
+ (2 * max_batch, self.text_maxlen, self.embedding_dim),
502
+ ],
503
+ }
504
+
505
+ def get_shape_dict(self, batch_size, image_height, image_width):
506
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
507
+ return {
508
+ "sample": (2 * batch_size, self.unet_dim, latent_height, latent_width),
509
+ "encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim),
510
+ "latent": (2 * batch_size, 4, latent_height, latent_width),
511
+ }
512
+
513
+ def get_sample_input(self, batch_size, image_height, image_width):
514
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
515
+ dtype = torch.float16 if self.fp16 else torch.float32
516
+ return (
517
+ torch.randn(
518
+ 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device
519
+ ),
520
+ torch.tensor([1.0], dtype=torch.float32, device=self.device),
521
+ torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),
522
+ )
523
+
524
+
525
+ def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False):
526
+ return UNet(
527
+ model,
528
+ fp16=True,
529
+ device=device,
530
+ max_batch_size=max_batch_size,
531
+ embedding_dim=embedding_dim,
532
+ unet_dim=(9 if inpaint else 4),
533
+ )
534
+
535
+
536
+ class VAE(BaseModel):
537
+ def __init__(self, model, device, max_batch_size, embedding_dim):
538
+ super(VAE, self).__init__(
539
+ model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
540
+ )
541
+ self.name = "VAE decoder"
542
+
543
+ def get_input_names(self):
544
+ return ["latent"]
545
+
546
+ def get_output_names(self):
547
+ return ["images"]
548
+
549
+ def get_dynamic_axes(self):
550
+ return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}}
551
+
552
+ def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
553
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
554
+ (
555
+ min_batch,
556
+ max_batch,
557
+ _,
558
+ _,
559
+ _,
560
+ _,
561
+ min_latent_height,
562
+ max_latent_height,
563
+ min_latent_width,
564
+ max_latent_width,
565
+ ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
566
+ return {
567
+ "latent": [
568
+ (min_batch, 4, min_latent_height, min_latent_width),
569
+ (batch_size, 4, latent_height, latent_width),
570
+ (max_batch, 4, max_latent_height, max_latent_width),
571
+ ]
572
+ }
573
+
574
+ def get_shape_dict(self, batch_size, image_height, image_width):
575
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
576
+ return {
577
+ "latent": (batch_size, 4, latent_height, latent_width),
578
+ "images": (batch_size, 3, image_height, image_width),
579
+ }
580
+
581
+ def get_sample_input(self, batch_size, image_height, image_width):
582
+ latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
583
+ return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)
584
+
585
+
586
+ def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):
587
+ return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
588
+
589
+
590
+ class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
591
+ r"""
592
+ Pipeline for text-to-image generation using TensorRT accelerated Stable Diffusion.
593
+
594
+ This model inherits from [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods the
595
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
596
+
597
+ Args:
598
+ vae ([`AutoencoderKL`]):
599
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
600
+ text_encoder ([`CLIPTextModel`]):
601
+ Frozen text-encoder. Stable Diffusion uses the text portion of
602
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
603
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
604
+ tokenizer (`CLIPTokenizer`):
605
+ Tokenizer of class
606
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
607
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
608
+ scheduler ([`SchedulerMixin`]):
609
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
610
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
611
+ safety_checker ([`StableDiffusionSafetyChecker`]):
612
+ Classification module that estimates whether generated images could be considered offensive or harmful.
613
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
614
+ feature_extractor ([`CLIPFeatureExtractor`]):
615
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
616
+ """
617
+
618
+ def __init__(
619
+ self,
620
+ vae: AutoencoderKL,
621
+ text_encoder: CLIPTextModel,
622
+ tokenizer: CLIPTokenizer,
623
+ unet: UNet2DConditionModel,
624
+ scheduler: DDIMScheduler,
625
+ safety_checker: StableDiffusionSafetyChecker,
626
+ feature_extractor: CLIPFeatureExtractor,
627
+ requires_safety_checker: bool = True,
628
+ stages=["clip", "unet", "vae"],
629
+ image_height: int = 768,
630
+ image_width: int = 768,
631
+ max_batch_size: int = 16,
632
+ # ONNX export parameters
633
+ onnx_opset: int = 17,
634
+ onnx_dir: str = "onnx",
635
+ # TensorRT engine build parameters
636
+ engine_dir: str = "engine",
637
+ build_preview_features: bool = True,
638
+ force_engine_rebuild: bool = False,
639
+ timing_cache: str = "timing_cache",
640
+ ):
641
+ super().__init__(
642
+ vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker
643
+ )
644
+
645
+ self.vae.forward = self.vae.decode
646
+
647
+ self.stages = stages
648
+ self.image_height, self.image_width = image_height, image_width
649
+ self.inpaint = False
650
+ self.onnx_opset = onnx_opset
651
+ self.onnx_dir = onnx_dir
652
+ self.engine_dir = engine_dir
653
+ self.force_engine_rebuild = force_engine_rebuild
654
+ self.timing_cache = timing_cache
655
+ self.build_static_batch = False
656
+ self.build_dynamic_shape = False
657
+ self.build_preview_features = build_preview_features
658
+
659
+ self.max_batch_size = max_batch_size
660
+ # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
661
+ if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:
662
+ self.max_batch_size = 4
663
+
664
+ self.stream = None # loaded in loadResources()
665
+ self.models = {} # loaded in __loadModels()
666
+ self.engine = {} # loaded in build_engines()
667
+
668
+ def __loadModels(self):
669
+ # Load pipeline models
670
+ self.embedding_dim = self.text_encoder.config.hidden_size
671
+ models_args = {
672
+ "device": self.torch_device,
673
+ "max_batch_size": self.max_batch_size,
674
+ "embedding_dim": self.embedding_dim,
675
+ "inpaint": self.inpaint,
676
+ }
677
+ if "clip" in self.stages:
678
+ self.models["clip"] = make_CLIP(self.text_encoder, **models_args)
679
+ if "unet" in self.stages:
680
+ self.models["unet"] = make_UNet(self.unet, **models_args)
681
+ if "vae" in self.stages:
682
+ self.models["vae"] = make_VAE(self.vae, **models_args)
683
+
684
+ @classmethod
685
+ def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
686
+ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
687
+ resume_download = kwargs.pop("resume_download", False)
688
+ proxies = kwargs.pop("proxies", None)
689
+ local_files_only = kwargs.pop("local_files_only", False)
690
+ use_auth_token = kwargs.pop("use_auth_token", None)
691
+ revision = kwargs.pop("revision", None)
692
+
693
+ cls.cached_folder = (
694
+ pretrained_model_name_or_path
695
+ if os.path.isdir(pretrained_model_name_or_path)
696
+ else snapshot_download(
697
+ pretrained_model_name_or_path,
698
+ cache_dir=cache_dir,
699
+ resume_download=resume_download,
700
+ proxies=proxies,
701
+ local_files_only=local_files_only,
702
+ use_auth_token=use_auth_token,
703
+ revision=revision,
704
+ )
705
+ )
706
+
707
+ def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):
708
+ super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)
709
+
710
+ self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)
711
+ self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)
712
+ self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)
713
+
714
+ # set device
715
+ self.torch_device = self._execution_device
716
+ logger.warning(f"Running inference on device: {self.torch_device}")
717
+
718
+ # load models
719
+ self.__loadModels()
720
+
721
+ # build engines
722
+ self.engine = build_engines(
723
+ self.models,
724
+ self.engine_dir,
725
+ self.onnx_dir,
726
+ self.onnx_opset,
727
+ opt_image_height=self.image_height,
728
+ opt_image_width=self.image_width,
729
+ force_engine_rebuild=self.force_engine_rebuild,
730
+ static_batch=self.build_static_batch,
731
+ static_shape=not self.build_dynamic_shape,
732
+ enable_preview=self.build_preview_features,
733
+ timing_cache=self.timing_cache,
734
+ )
735
+
736
+ return self
737
+
738
+ def __encode_prompt(self, prompt, negative_prompt):
739
+ r"""
740
+ Encodes the prompt into text encoder hidden states.
741
+
742
+ Args:
743
+ prompt (`str` or `List[str]`, *optional*):
744
+ prompt to be encoded
745
+ negative_prompt (`str` or `List[str]`, *optional*):
746
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
747
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
748
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
749
+ """
750
+ # Tokenize prompt
751
+ text_input_ids = (
752
+ self.tokenizer(
753
+ prompt,
754
+ padding="max_length",
755
+ max_length=self.tokenizer.model_max_length,
756
+ truncation=True,
757
+ return_tensors="pt",
758
+ )
759
+ .input_ids.type(torch.int32)
760
+ .to(self.torch_device)
761
+ )
762
+
763
+ text_input_ids_inp = device_view(text_input_ids)
764
+ # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
765
+ text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[
766
+ "text_embeddings"
767
+ ].clone()
768
+
769
+ # Tokenize negative prompt
770
+ uncond_input_ids = (
771
+ self.tokenizer(
772
+ negative_prompt,
773
+ padding="max_length",
774
+ max_length=self.tokenizer.model_max_length,
775
+ truncation=True,
776
+ return_tensors="pt",
777
+ )
778
+ .input_ids.type(torch.int32)
779
+ .to(self.torch_device)
780
+ )
781
+ uncond_input_ids_inp = device_view(uncond_input_ids)
782
+ uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
783
+ "text_embeddings"
784
+ ]
785
+
786
+ # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance
787
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)
788
+
789
+ return text_embeddings
790
+
791
+ def __denoise_latent(
792
+ self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None
793
+ ):
794
+ if not isinstance(timesteps, torch.Tensor):
795
+ timesteps = self.scheduler.timesteps
796
+ for step_index, timestep in enumerate(timesteps):
797
+ # Expand the latents if we are doing classifier free guidance
798
+ latent_model_input = torch.cat([latents] * 2)
799
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
800
+ if isinstance(mask, torch.Tensor):
801
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
802
+
803
+ # Predict the noise residual
804
+ timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
805
+
806
+ sample_inp = device_view(latent_model_input)
807
+ timestep_inp = device_view(timestep_float)
808
+ embeddings_inp = device_view(text_embeddings)
809
+ noise_pred = runEngine(
810
+ self.engine["unet"],
811
+ {"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp},
812
+ self.stream,
813
+ )["latent"]
814
+
815
+ # Perform guidance
816
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
817
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
818
+
819
+ latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
820
+
821
+ latents = 1.0 / 0.18215 * latents
822
+ return latents
823
+
824
+ def __decode_latent(self, latents):
825
+ images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"]
826
+ images = (images / 2 + 0.5).clamp(0, 1)
827
+ return images.cpu().permute(0, 2, 3, 1).float().numpy()
828
+
829
+ def __loadResources(self, image_height, image_width, batch_size):
830
+ self.stream = cuda.Stream()
831
+
832
+ # Allocate buffers for TensorRT engine bindings
833
+ for model_name, obj in self.models.items():
834
+ self.engine[model_name].allocate_buffers(
835
+ shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device
836
+ )
837
+
838
+ @torch.no_grad()
839
+ def __call__(
840
+ self,
841
+ prompt: Union[str, List[str]] = None,
842
+ num_inference_steps: int = 50,
843
+ guidance_scale: float = 7.5,
844
+ negative_prompt: Optional[Union[str, List[str]]] = None,
845
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
846
+ ):
847
+ r"""
848
+ Function invoked when calling the pipeline for generation.
849
+
850
+ Args:
851
+ prompt (`str` or `List[str]`, *optional*):
852
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
853
+ instead.
854
+ num_inference_steps (`int`, *optional*, defaults to 50):
855
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
856
+ expense of slower inference.
857
+ guidance_scale (`float`, *optional*, defaults to 7.5):
858
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
859
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
860
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
861
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
862
+ usually at the expense of lower image quality.
863
+ negative_prompt (`str` or `List[str]`, *optional*):
864
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
865
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
866
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
867
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
868
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
869
+ to make generation deterministic.
870
+
871
+ """
872
+ self.generator = generator
873
+ self.denoising_steps = num_inference_steps
874
+ self.guidance_scale = guidance_scale
875
+
876
+ # Pre-compute latent input scales and linear multistep coefficients
877
+ self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)
878
+
879
+ # Define call parameters
880
+ if prompt is not None and isinstance(prompt, str):
881
+ batch_size = 1
882
+ prompt = [prompt]
883
+ elif prompt is not None and isinstance(prompt, list):
884
+ batch_size = len(prompt)
885
+ else:
886
+ raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}")
887
+
888
+ if negative_prompt is None:
889
+ negative_prompt = [""] * batch_size
890
+
891
+ if negative_prompt is not None and isinstance(negative_prompt, str):
892
+ negative_prompt = [negative_prompt]
893
+
894
+ assert len(prompt) == len(negative_prompt)
895
+
896
+ if batch_size > self.max_batch_size:
897
+ raise ValueError(
898
+ f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4"
899
+ )
900
+
901
+ # load resources
902
+ self.__loadResources(self.image_height, self.image_width, batch_size)
903
+
904
+ with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER):
905
+ # CLIP text encoder
906
+ text_embeddings = self.__encode_prompt(prompt, negative_prompt)
907
+
908
+ # Pre-initialize latents
909
+ num_channels_latents = self.unet.in_channels
910
+ latents = self.prepare_latents(
911
+ batch_size,
912
+ num_channels_latents,
913
+ self.image_height,
914
+ self.image_width,
915
+ torch.float32,
916
+ self.torch_device,
917
+ generator,
918
+ )
919
+
920
+ # UNet denoiser
921
+ latents = self.__denoise_latent(latents, text_embeddings)
922
+
923
+ # VAE decode latent
924
+ images = self.__decode_latent(latents)
925
+
926
+ images, has_nsfw_concept = self.run_safety_checker(images, self.torch_device, text_embeddings.dtype)
927
+ images = self.numpy_to_pil(images)
928
+ return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)