2MaxM commited on
Commit
c3a55c4
1 Parent(s): b26070f
Files changed (1) hide show
  1. zero123plus/pipeline.Py +0 -406
zero123plus/pipeline.Py DELETED
@@ -1,406 +0,0 @@
1
- from typing import Any, Dict, Optional
2
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
3
- from diffusers.schedulers import KarrasDiffusionSchedulers
4
-
5
- import numpy
6
- import torch
7
- import torch.nn as nn
8
- import torch.utils.checkpoint
9
- import torch.distributed
10
- import transformers
11
- from collections import OrderedDict
12
- from PIL import Image
13
- from torchvision import transforms
14
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
15
-
16
- import diffusers
17
- from diffusers import (
18
- AutoencoderKL,
19
- DDPMScheduler,
20
- DiffusionPipeline,
21
- EulerAncestralDiscreteScheduler,
22
- UNet2DConditionModel,
23
- ImagePipelineOutput
24
- )
25
- from diffusers.image_processor import VaeImageProcessor
26
- from diffusers.models.attention_processor import Attention, AttnProcessor, XFormersAttnProcessor, AttnProcessor2_0
27
- from diffusers.utils.import_utils import is_xformers_available
28
-
29
-
30
- def to_rgb_image(maybe_rgba: Image.Image):
31
- if maybe_rgba.mode == 'RGB':
32
- return maybe_rgba
33
- elif maybe_rgba.mode == 'RGBA':
34
- rgba = maybe_rgba
35
- img = numpy.random.randint(255, 256, size=[rgba.size[1], rgba.size[0], 3], dtype=numpy.uint8)
36
- img = Image.fromarray(img, 'RGB')
37
- img.paste(rgba, mask=rgba.getchannel('A'))
38
- return img
39
- else:
40
- raise ValueError("Unsupported image type.", maybe_rgba.mode)
41
-
42
-
43
- class ReferenceOnlyAttnProc(torch.nn.Module):
44
- def __init__(
45
- self,
46
- chained_proc,
47
- enabled=False,
48
- name=None
49
- ) -> None:
50
- super().__init__()
51
- self.enabled = enabled
52
- self.chained_proc = chained_proc
53
- self.name = name
54
-
55
- def __call__(
56
- self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None,
57
- mode="w", ref_dict: dict = None, is_cfg_guidance = False
58
- ) -> Any:
59
- if encoder_hidden_states is None:
60
- encoder_hidden_states = hidden_states
61
- if self.enabled and is_cfg_guidance:
62
- res0 = self.chained_proc(attn, hidden_states[:1], encoder_hidden_states[:1], attention_mask)
63
- hidden_states = hidden_states[1:]
64
- encoder_hidden_states = encoder_hidden_states[1:]
65
- if self.enabled:
66
- if mode == 'w':
67
- ref_dict[self.name] = encoder_hidden_states
68
- elif mode == 'r':
69
- encoder_hidden_states = torch.cat([encoder_hidden_states, ref_dict.pop(self.name)], dim=1)
70
- elif mode == 'm':
71
- encoder_hidden_states = torch.cat([encoder_hidden_states, ref_dict[self.name]], dim=1)
72
- else:
73
- assert False, mode
74
- res = self.chained_proc(attn, hidden_states, encoder_hidden_states, attention_mask)
75
- if self.enabled and is_cfg_guidance:
76
- res = torch.cat([res0, res])
77
- return res
78
-
79
-
80
- class RefOnlyNoisedUNet(torch.nn.Module):
81
- def __init__(self, unet: UNet2DConditionModel, train_sched: DDPMScheduler, val_sched: EulerAncestralDiscreteScheduler) -> None:
82
- super().__init__()
83
- self.unet = unet
84
- self.train_sched = train_sched
85
- self.val_sched = val_sched
86
-
87
- unet_lora_attn_procs = dict()
88
- for name, _ in unet.attn_processors.items():
89
- if torch.__version__ >= '2.0':
90
- default_attn_proc = AttnProcessor2_0()
91
- elif is_xformers_available():
92
- default_attn_proc = XFormersAttnProcessor()
93
- else:
94
- default_attn_proc = AttnProcessor()
95
- unet_lora_attn_procs[name] = ReferenceOnlyAttnProc(
96
- default_attn_proc, enabled=name.endswith("attn1.processor"), name=name
97
- )
98
- unet.set_attn_processor(unet_lora_attn_procs)
99
-
100
- def __getattr__(self, name: str):
101
- try:
102
- return super().__getattr__(name)
103
- except AttributeError:
104
- return getattr(self.unet, name)
105
-
106
- def forward_cond(self, noisy_cond_lat, timestep, encoder_hidden_states, class_labels, ref_dict, is_cfg_guidance, **kwargs):
107
- if is_cfg_guidance:
108
- encoder_hidden_states = encoder_hidden_states[1:]
109
- class_labels = class_labels[1:]
110
- self.unet(
111
- noisy_cond_lat, timestep,
112
- encoder_hidden_states=encoder_hidden_states,
113
- class_labels=class_labels,
114
- cross_attention_kwargs=dict(mode="w", ref_dict=ref_dict),
115
- **kwargs
116
- )
117
-
118
- def forward(
119
- self, sample, timestep, encoder_hidden_states, class_labels=None,
120
- *args, cross_attention_kwargs,
121
- down_block_res_samples=None, mid_block_res_sample=None,
122
- **kwargs
123
- ):
124
- cond_lat = cross_attention_kwargs['cond_lat']
125
- is_cfg_guidance = cross_attention_kwargs.get('is_cfg_guidance', False)
126
- noise = torch.randn_like(cond_lat)
127
- if self.training:
128
- noisy_cond_lat = self.train_sched.add_noise(cond_lat, noise, timestep)
129
- noisy_cond_lat = self.train_sched.scale_model_input(noisy_cond_lat, timestep)
130
- else:
131
- noisy_cond_lat = self.val_sched.add_noise(cond_lat, noise, timestep.reshape(-1))
132
- noisy_cond_lat = self.val_sched.scale_model_input(noisy_cond_lat, timestep.reshape(-1))
133
- ref_dict = {}
134
- self.forward_cond(
135
- noisy_cond_lat, timestep,
136
- encoder_hidden_states, class_labels,
137
- ref_dict, is_cfg_guidance, **kwargs
138
- )
139
- weight_dtype = self.unet.dtype
140
- return self.unet(
141
- sample, timestep,
142
- encoder_hidden_states, *args,
143
- class_labels=class_labels,
144
- cross_attention_kwargs=dict(mode="r", ref_dict=ref_dict, is_cfg_guidance=is_cfg_guidance),
145
- down_block_additional_residuals=[
146
- sample.to(dtype=weight_dtype) for sample in down_block_res_samples
147
- ] if down_block_res_samples is not None else None,
148
- mid_block_additional_residual=(
149
- mid_block_res_sample.to(dtype=weight_dtype)
150
- if mid_block_res_sample is not None else None
151
- ),
152
- **kwargs
153
- )
154
-
155
-
156
- def scale_latents(latents):
157
- latents = (latents - 0.22) * 0.75
158
- return latents
159
-
160
-
161
- def unscale_latents(latents):
162
- latents = latents / 0.75 + 0.22
163
- return latents
164
-
165
-
166
- def scale_image(image):
167
- image = image * 0.5 / 0.8
168
- return image
169
-
170
-
171
- def unscale_image(image):
172
- image = image / 0.5 * 0.8
173
- return image
174
-
175
-
176
- class DepthControlUNet(torch.nn.Module):
177
- def __init__(self, unet: RefOnlyNoisedUNet, controlnet: Optional[diffusers.ControlNetModel] = None, conditioning_scale=1.0) -> None:
178
- super().__init__()
179
- self.unet = unet
180
- if controlnet is None:
181
- self.controlnet = diffusers.ControlNetModel.from_unet(unet.unet)
182
- else:
183
- self.controlnet = controlnet
184
- DefaultAttnProc = AttnProcessor2_0
185
- if is_xformers_available():
186
- DefaultAttnProc = XFormersAttnProcessor
187
- self.controlnet.set_attn_processor(DefaultAttnProc())
188
- self.conditioning_scale = conditioning_scale
189
-
190
- def __getattr__(self, name: str):
191
- try:
192
- return super().__getattr__(name)
193
- except AttributeError:
194
- return getattr(self.unet, name)
195
-
196
- def forward(self, sample, timestep, encoder_hidden_states, class_labels=None, *args, cross_attention_kwargs: dict, **kwargs):
197
- cross_attention_kwargs = dict(cross_attention_kwargs)
198
- control_depth = cross_attention_kwargs.pop('control_depth')
199
- down_block_res_samples, mid_block_res_sample = self.controlnet(
200
- sample,
201
- timestep,
202
- encoder_hidden_states=encoder_hidden_states,
203
- controlnet_cond=control_depth,
204
- conditioning_scale=self.conditioning_scale,
205
- return_dict=False,
206
- )
207
- return self.unet(
208
- sample,
209
- timestep,
210
- encoder_hidden_states=encoder_hidden_states,
211
- down_block_res_samples=down_block_res_samples,
212
- mid_block_res_sample=mid_block_res_sample,
213
- cross_attention_kwargs=cross_attention_kwargs
214
- )
215
-
216
-
217
- class ModuleListDict(torch.nn.Module):
218
- def __init__(self, procs: dict) -> None:
219
- super().__init__()
220
- self.keys = sorted(procs.keys())
221
- self.values = torch.nn.ModuleList(procs[k] for k in self.keys)
222
-
223
- def __getitem__(self, key):
224
- return self.values[self.keys.index(key)]
225
-
226
-
227
- class SuperNet(torch.nn.Module):
228
- def __init__(self, state_dict: Dict[str, torch.Tensor]):
229
- super().__init__()
230
- state_dict = OrderedDict((k, state_dict[k]) for k in sorted(state_dict.keys()))
231
- self.layers = torch.nn.ModuleList(state_dict.values())
232
- self.mapping = dict(enumerate(state_dict.keys()))
233
- self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
234
-
235
- # .processor for unet, .self_attn for text encoder
236
- self.split_keys = [".processor", ".self_attn"]
237
-
238
- # we add a hook to state_dict() and load_state_dict() so that the
239
- # naming fits with `unet.attn_processors`
240
- def map_to(module, state_dict, *args, **kwargs):
241
- new_state_dict = {}
242
- for key, value in state_dict.items():
243
- num = int(key.split(".")[1]) # 0 is always "layers"
244
- new_key = key.replace(f"layers.{num}", module.mapping[num])
245
- new_state_dict[new_key] = value
246
-
247
- return new_state_dict
248
-
249
- def remap_key(key, state_dict):
250
- for k in self.split_keys:
251
- if k in key:
252
- return key.split(k)[0] + k
253
- return key.split('.')[0]
254
-
255
- def map_from(module, state_dict, *args, **kwargs):
256
- all_keys = list(state_dict.keys())
257
- for key in all_keys:
258
- replace_key = remap_key(key, state_dict)
259
- new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
260
- state_dict[new_key] = state_dict[key]
261
- del state_dict[key]
262
-
263
- self._register_state_dict_hook(map_to)
264
- self._register_load_state_dict_pre_hook(map_from, with_module=True)
265
-
266
-
267
- class Zero123PlusPipeline(diffusers.StableDiffusionPipeline):
268
- tokenizer: transformers.CLIPTokenizer
269
- text_encoder: transformers.CLIPTextModel
270
- vision_encoder: transformers.CLIPVisionModelWithProjection
271
-
272
- feature_extractor_clip: transformers.CLIPImageProcessor
273
- unet: UNet2DConditionModel
274
- scheduler: diffusers.schedulers.KarrasDiffusionSchedulers
275
-
276
- vae: AutoencoderKL
277
- ramping: nn.Linear
278
-
279
- feature_extractor_vae: transformers.CLIPImageProcessor
280
-
281
- depth_transforms_multi = transforms.Compose([
282
- transforms.ToTensor(),
283
- transforms.Normalize([0.5], [0.5])
284
- ])
285
-
286
- def __init__(
287
- self,
288
- vae: AutoencoderKL,
289
- text_encoder: CLIPTextModel,
290
- tokenizer: CLIPTokenizer,
291
- unet: UNet2DConditionModel,
292
- scheduler: KarrasDiffusionSchedulers,
293
- vision_encoder: transformers.CLIPVisionModelWithProjection,
294
- feature_extractor_clip: CLIPImageProcessor,
295
- feature_extractor_vae: CLIPImageProcessor,
296
- ramping_coefficients: Optional[list] = None,
297
- safety_checker=None,
298
- ):
299
- DiffusionPipeline.__init__(self)
300
-
301
- self.register_modules(
302
- vae=vae, text_encoder=text_encoder, tokenizer=tokenizer,
303
- unet=unet, scheduler=scheduler, safety_checker=None,
304
- vision_encoder=vision_encoder,
305
- feature_extractor_clip=feature_extractor_clip,
306
- feature_extractor_vae=feature_extractor_vae
307
- )
308
- self.register_to_config(ramping_coefficients=ramping_coefficients)
309
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
310
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
311
-
312
- def prepare(self):
313
- train_sched = DDPMScheduler.from_config(self.scheduler.config)
314
- if isinstance(self.unet, UNet2DConditionModel):
315
- self.unet = RefOnlyNoisedUNet(self.unet, train_sched, self.scheduler).eval()
316
-
317
- def add_controlnet(self, controlnet: Optional[diffusers.ControlNetModel] = None, conditioning_scale=1.0):
318
- self.prepare()
319
- self.unet = DepthControlUNet(self.unet, controlnet, conditioning_scale)
320
- return SuperNet(OrderedDict([('controlnet', self.unet.controlnet)]))
321
-
322
- def encode_condition_image(self, image: torch.Tensor):
323
- image = self.vae.encode(image).latent_dist.sample()
324
- return image
325
-
326
- @torch.no_grad()
327
- def __call__(
328
- self,
329
- image: Image.Image = None,
330
- prompt = "",
331
- *args,
332
- num_images_per_prompt: Optional[int] = 1,
333
- guidance_scale=4.0,
334
- depth_image: Image.Image = None,
335
- output_type: Optional[str] = "pil",
336
- width=640,
337
- height=960,
338
- num_inference_steps=28,
339
- return_dict=True,
340
- **kwargs
341
- ):
342
- self.prepare()
343
- if image is None:
344
- raise ValueError("Inputting embeddings not supported for this pipeline. Please pass an image.")
345
- assert not isinstance(image, torch.Tensor)
346
- image = to_rgb_image(image)
347
- image_1 = self.feature_extractor_vae(images=image, return_tensors="pt").pixel_values
348
- image_2 = self.feature_extractor_clip(images=image, return_tensors="pt").pixel_values
349
- if depth_image is not None and hasattr(self.unet, "controlnet"):
350
- depth_image = to_rgb_image(depth_image)
351
- depth_image = self.depth_transforms_multi(depth_image).to(
352
- device=self.unet.controlnet.device, dtype=self.unet.controlnet.dtype
353
- )
354
- image = image_1.to(device=self.vae.device, dtype=self.vae.dtype)
355
- image_2 = image_2.to(device=self.vae.device, dtype=self.vae.dtype)
356
- cond_lat = self.encode_condition_image(image)
357
- if guidance_scale > 1:
358
- negative_lat = self.encode_condition_image(torch.zeros_like(image))
359
- cond_lat = torch.cat([negative_lat, cond_lat])
360
- encoded = self.vision_encoder(image_2, output_hidden_states=False)
361
- global_embeds = encoded.image_embeds
362
- global_embeds = global_embeds.unsqueeze(-2)
363
-
364
- if hasattr(self, "encode_prompt"):
365
- encoder_hidden_states = self.encode_prompt(
366
- prompt,
367
- self.device,
368
- num_images_per_prompt,
369
- False
370
- )[0]
371
- else:
372
- encoder_hidden_states = self._encode_prompt(
373
- prompt,
374
- self.device,
375
- num_images_per_prompt,
376
- False
377
- )
378
- ramp = global_embeds.new_tensor(self.config.ramping_coefficients).unsqueeze(-1)
379
- encoder_hidden_states = encoder_hidden_states + global_embeds * ramp
380
- cak = dict(cond_lat=cond_lat)
381
- if hasattr(self.unet, "controlnet"):
382
- cak['control_depth'] = depth_image
383
- latents: torch.Tensor = super().__call__(
384
- None,
385
- *args,
386
- cross_attention_kwargs=cak,
387
- guidance_scale=guidance_scale,
388
- num_images_per_prompt=num_images_per_prompt,
389
- prompt_embeds=encoder_hidden_states,
390
- num_inference_steps=num_inference_steps,
391
- output_type='latent',
392
- width=width,
393
- height=height,
394
- **kwargs
395
- ).images
396
- latents = unscale_latents(latents)
397
- if not output_type == "latent":
398
- image = unscale_image(self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0])
399
- else:
400
- image = latents
401
-
402
- image = self.image_processor.postprocess(image, output_type=output_type)
403
- if not return_dict:
404
- return (image,)
405
-
406
- return ImagePipelineOutput(images=image)