Spaces:
Paused
Paused
nekoshadow
commited on
Commit
•
4f2a492
1
Parent(s):
0f417f9
Add controlnet
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- app.py +105 -78
- controlnet_ckpt/config.json +32 -0
- controlnet_sync.py +368 -0
- diffusers/__init__.py +758 -0
- diffusers/commands/__init__.py +27 -0
- diffusers/commands/diffusers_cli.py +43 -0
- diffusers/commands/env.py +84 -0
- diffusers/commands/fp16_safetensors.py +132 -0
- diffusers/configuration_utils.py +699 -0
- diffusers/dependency_versions_check.py +34 -0
- diffusers/dependency_versions_table.py +46 -0
- diffusers/experimental/README.md +5 -0
- diffusers/experimental/__init__.py +1 -0
- diffusers/experimental/rl/__init__.py +1 -0
- diffusers/experimental/rl/value_guided_sampling.py +153 -0
- diffusers/image_processor.py +884 -0
- diffusers/loaders/__init__.py +86 -0
- diffusers/loaders/ip_adapter.py +190 -0
- diffusers/loaders/lora.py +1553 -0
- diffusers/loaders/lora_conversion_utils.py +284 -0
- diffusers/loaders/peft.py +188 -0
- diffusers/loaders/single_file.py +637 -0
- diffusers/loaders/textual_inversion.py +455 -0
- diffusers/loaders/unet.py +828 -0
- diffusers/loaders/utils.py +59 -0
- diffusers/models/README.md +3 -0
- diffusers/models/__init__.py +94 -0
- diffusers/models/activations.py +123 -0
- diffusers/models/adapter.py +584 -0
- diffusers/models/attention.py +668 -0
- diffusers/models/attention_flax.py +494 -0
- diffusers/models/attention_processor.py +0 -0
- diffusers/models/autoencoders/__init__.py +5 -0
- diffusers/models/autoencoders/autoencoder_asym_kl.py +186 -0
- diffusers/models/autoencoders/autoencoder_kl.py +487 -0
- diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +400 -0
- diffusers/models/autoencoders/autoencoder_tiny.py +345 -0
- diffusers/models/autoencoders/consistency_decoder_vae.py +435 -0
- diffusers/models/autoencoders/vae.py +983 -0
- diffusers/models/controlnet.py +862 -0
- diffusers/models/controlnet_flax.py +395 -0
- diffusers/models/downsampling.py +338 -0
- diffusers/models/dual_transformer_2d.py +155 -0
- diffusers/models/embeddings.py +880 -0
- diffusers/models/embeddings_flax.py +97 -0
- diffusers/models/lora.py +434 -0
- diffusers/models/modeling_flax_pytorch_utils.py +134 -0
- diffusers/models/modeling_flax_utils.py +566 -0
- diffusers/models/modeling_outputs.py +17 -0
- diffusers/models/modeling_pytorch_flax_utils.py +161 -0
app.py
CHANGED
@@ -12,25 +12,22 @@ from ldm.models.diffusion.sync_dreamer import SyncDDIMSampler, SyncMultiviewDiff
|
|
12 |
from ldm.util import add_margin, instantiate_from_config
|
13 |
from sam_utils import sam_init, sam_out_nosave
|
14 |
|
15 |
-
import
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
_DESCRIPTION = '''
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
<a style="display:inline-block; margin-left: .5em" href='https://github.com/liuyuan-pal/SyncDreamer'><img src='https://img.shields.io/github/stars/liuyuan-pal/SyncDreamer?style=social' /></a>
|
22 |
-
</div>
|
23 |
-
Given a single-view image, SyncDreamer is able to generate multiview-consistent images, which enables direct 3D reconstruction with NeuS or NeRF without SDS loss </br>
|
24 |
-
|
25 |
-
Procedure: </br>
|
26 |
-
**Step 1**. Upload an image or select an example. ==> The foreground is masked out by SAM and we crop it as inputs. </br>
|
27 |
-
**Step 2**. Select "Elevation angle "and click "Run generation". ==> Generate multiview images. The **Elevation angle** is the elevation of the input image. (This costs about 30s.) </br>
|
28 |
-
You may adjust the **Crop size** and **Elevation angle** to get a better result! <br>
|
29 |
-
To reconstruct a NeRF or a 3D mesh from the generated images, please refer to our [github repository](https://github.com/liuyuan-pal/SyncDreamer). <br>
|
30 |
-
We have heavily borrowed codes from [One-2-3-45](https://huggingface.co/spaces/One-2-3-45/One-2-3-45), which is also an amazing single-view reconstruction method.
|
31 |
'''
|
32 |
_USER_GUIDE0 = "Step1: Please upload an image in the block above (or choose an example shown in the left)."
|
33 |
-
# _USER_GUIDE1 = "Step1: Please select a **Crop size** and click **Crop it**."
|
34 |
_USER_GUIDE2 = "Step2: Please choose a **Elevation angle** and click **Run Generate**. The **Elevation angle** is the elevation of the input image. This costs about 30s."
|
35 |
_USER_GUIDE3 = "Generated multiview images are shown below! (You may adjust the **Crop size** and **Elevation angle** to get a better result!)"
|
36 |
|
@@ -79,48 +76,51 @@ def resize_inputs(image_input, crop_size):
|
|
79 |
results = add_margin(ref_img_, size=256)
|
80 |
return results
|
81 |
|
82 |
-
def generate(model, sample_steps, batch_view_num, sample_num, cfg_scale, seed, image_input, elevation_input):
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
124 |
|
125 |
def sam_predict(predictor, removal, raw_im):
|
126 |
if raw_im is None: return None
|
@@ -152,26 +152,51 @@ def sam_predict(predictor, removal, raw_im):
|
|
152 |
else:
|
153 |
return raw_im
|
154 |
|
155 |
-
def
|
156 |
-
# device = f"cuda:0" if torch.cuda.is_available() else "cpu"
|
157 |
-
# models = None # init_model(device, os.path.join(code_dir, ckpt))
|
158 |
-
cfg = 'configs/syncdreamer.yaml'
|
159 |
-
ckpt = 'ckpt/syncdreamer-pretrain.ckpt'
|
160 |
config = OmegaConf.load(cfg)
|
161 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
if deployed:
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
mask_predictor = sam_init()
|
170 |
removal = BackgroundRemoval()
|
171 |
else:
|
172 |
-
model = None
|
173 |
-
mask_predictor = None
|
174 |
-
removal = None
|
|
|
|
|
|
|
175 |
|
176 |
# NOTE: Examples must match inputs
|
177 |
examples_full = [
|
@@ -186,9 +211,11 @@ def run_demo():
|
|
186 |
]
|
187 |
|
188 |
image_block = gr.Image(type='pil', image_mode='RGBA', height=256, label='Input image', tool=None, interactive=True)
|
189 |
-
|
190 |
crop_size = gr.Slider(120, 240, 200, step=10, label='Crop size', interactive=True)
|
191 |
|
|
|
|
|
192 |
# Compose demo layout & data flow.
|
193 |
with gr.Blocks(title=_TITLE, css="hf_demo/style.css") as demo:
|
194 |
with gr.Row():
|
@@ -202,8 +229,8 @@ def run_demo():
|
|
202 |
with gr.Column(scale=1.2):
|
203 |
gr.Examples(
|
204 |
examples=examples_full, # NOTE: elements must match inputs list!
|
205 |
-
inputs=[image_block,
|
206 |
-
outputs=[image_block,
|
207 |
cache_examples=False,
|
208 |
label='Examples (click one of the images below to start)',
|
209 |
examples_per_page=5,
|
@@ -223,7 +250,7 @@ def run_demo():
|
|
223 |
|
224 |
with gr.Column(scale=0.8):
|
225 |
input_block = gr.Image(type='pil', image_mode='RGBA', label="Input to SyncDreamer", height=256, interactive=False)
|
226 |
-
|
227 |
with gr.Accordion('Advanced options', open=False):
|
228 |
cfg_scale = gr.Slider(1.0, 5.0, 2.0, step=0.1, label='Classifier free guidance', interactive=True)
|
229 |
sample_num = gr.Slider(1, 2, 1, step=1, label='Sample num', interactive=False, info='How many instance (16 images per instance)')
|
@@ -252,7 +279,7 @@ def run_demo():
|
|
252 |
# crop_btn.click(fn=resize_inputs, inputs=[sam_block, crop_size], outputs=[input_block], queue=False)\
|
253 |
# .success(fn=partial(update_guide, _USER_GUIDE2), outputs=[guide_text], queue=False)
|
254 |
|
255 |
-
run_btn.click(partial(generate,
|
256 |
.success(fn=partial(update_guide, _USER_GUIDE3), outputs=[guide_text], queue=False)
|
257 |
|
258 |
demo.queue().launch(share=False, max_threads=80) # auth=("admin", os.environ['PASSWD'])
|
|
|
12 |
from ldm.util import add_margin, instantiate_from_config
|
13 |
from sam_utils import sam_init, sam_out_nosave
|
14 |
|
15 |
+
from ldm.util import instantiate_from_config, prepare_inputs
|
16 |
+
import argparse
|
17 |
+
import cv2
|
18 |
+
from transformers import pipeline
|
19 |
+
from diffusers.utils import load_image, make_image_grid
|
20 |
+
from diffusers import UniPCMultistepScheduler
|
21 |
+
from pipeline_controlnet_sync import StableDiffusionControlNetPipeline
|
22 |
+
from controlnet_sync import ControlNetModelSync
|
23 |
+
|
24 |
+
_TITLE = '''ControlNet + SyncDreamer'''
|
25 |
_DESCRIPTION = '''
|
26 |
+
Given a single-view image and select a target azimuth, ControlNet + SyncDreamer is able to generate the target view
|
27 |
+
|
28 |
+
This HF app is modified from [SyncDreamer HF app](https://huggingface.co/spaces/liuyuan-pal/SyncDreamer). The difference is that I added ControlNet on top of SyncDreamer.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
'''
|
30 |
_USER_GUIDE0 = "Step1: Please upload an image in the block above (or choose an example shown in the left)."
|
|
|
31 |
_USER_GUIDE2 = "Step2: Please choose a **Elevation angle** and click **Run Generate**. The **Elevation angle** is the elevation of the input image. This costs about 30s."
|
32 |
_USER_GUIDE3 = "Generated multiview images are shown below! (You may adjust the **Crop size** and **Elevation angle** to get a better result!)"
|
33 |
|
|
|
76 |
results = add_margin(ref_img_, size=256)
|
77 |
return results
|
78 |
|
79 |
+
# def generate(model, sample_steps, batch_view_num, sample_num, cfg_scale, seed, image_input, elevation_input):
|
80 |
+
# if deployed:
|
81 |
+
# assert isinstance(model, SyncMultiviewDiffusion)
|
82 |
+
# seed=int(seed)
|
83 |
+
# torch.random.manual_seed(seed)
|
84 |
+
# np.random.seed(seed)
|
85 |
+
|
86 |
+
# # prepare data
|
87 |
+
# image_input = np.asarray(image_input)
|
88 |
+
# image_input = image_input.astype(np.float32) / 255.0
|
89 |
+
# alpha_values = image_input[:,:, 3:]
|
90 |
+
# image_input[:, :, :3] = alpha_values * image_input[:,:, :3] + 1 - alpha_values # white background
|
91 |
+
# image_input = image_input[:, :, :3] * 2.0 - 1.0
|
92 |
+
# image_input = torch.from_numpy(image_input.astype(np.float32))
|
93 |
+
# elevation_input = torch.from_numpy(np.asarray([np.deg2rad(elevation_input)], np.float32))
|
94 |
+
# data = {"input_image": image_input, "input_elevation": elevation_input}
|
95 |
+
# for k, v in data.items():
|
96 |
+
# if deployed:
|
97 |
+
# data[k] = v.unsqueeze(0).cuda()
|
98 |
+
# else:
|
99 |
+
# data[k] = v.unsqueeze(0)
|
100 |
+
# data[k] = torch.repeat_interleave(data[k], sample_num, dim=0)
|
101 |
+
|
102 |
+
# if deployed:
|
103 |
+
# sampler = SyncDDIMSampler(model, sample_steps)
|
104 |
+
# x_sample = model.sample(sampler, data, cfg_scale, batch_view_num)
|
105 |
+
# else:
|
106 |
+
# x_sample = torch.zeros(sample_num, 16, 3, 256, 256)
|
107 |
+
|
108 |
+
# B, N, _, H, W = x_sample.shape
|
109 |
+
# x_sample = (torch.clamp(x_sample,max=1.0,min=-1.0) + 1) * 0.5
|
110 |
+
# x_sample = x_sample.permute(0,1,3,4,2).cpu().numpy() * 255
|
111 |
+
# x_sample = x_sample.astype(np.uint8)
|
112 |
+
|
113 |
+
# results = []
|
114 |
+
# for bi in range(B):
|
115 |
+
# results.append(np.concatenate([x_sample[bi,ni] for ni in range(N)], 1))
|
116 |
+
# results = np.concatenate(results, 0)
|
117 |
+
# return Image.fromarray(results)
|
118 |
+
# else:
|
119 |
+
# return Image.fromarray(np.zeros([sample_num*256,16*256,3],np.uint8))
|
120 |
+
|
121 |
+
def generate(pipe, image_input, target_index):
|
122 |
+
output = pipe(conditioning_image=image_input)
|
123 |
+
return output[target_index]
|
124 |
|
125 |
def sam_predict(predictor, removal, raw_im):
|
126 |
if raw_im is None: return None
|
|
|
152 |
else:
|
153 |
return raw_im
|
154 |
|
155 |
+
def load_model(cfg,ckpt,strict=True):
|
|
|
|
|
|
|
|
|
156 |
config = OmegaConf.load(cfg)
|
157 |
+
model = instantiate_from_config(config.model)
|
158 |
+
print(f'loading model from {ckpt} ...')
|
159 |
+
ckpt = torch.load(ckpt,map_location='cuda')
|
160 |
+
model.load_state_dict(ckpt['state_dict'],strict=strict)
|
161 |
+
model = model.cuda().eval()
|
162 |
+
return model
|
163 |
+
|
164 |
+
def run_demo():
|
165 |
+
# # device = f"cuda:0" if torch.cuda.is_available() else "cpu"
|
166 |
+
# # models = None # init_model(device, os.path.join(code_dir, ckpt))
|
167 |
+
# cfg = 'configs/syncdreamer.yaml'
|
168 |
+
# ckpt = 'ckpt/syncdreamer-pretrain.ckpt'
|
169 |
+
# config = OmegaConf.load(cfg)
|
170 |
+
# # model = None
|
171 |
+
|
172 |
if deployed:
|
173 |
+
controlnet = ControlNetModelSync.from_pretrained('controlnet_ckpt', torch_dtype=torch.float32, use_safetensors=True)
|
174 |
+
cfg = 'configs/syncdreamer.yaml'
|
175 |
+
dreamer = load_model(cfg, 'ckpt/syncdreamer-pretrain.ckpt', strict=True)
|
176 |
+
|
177 |
+
controlnet.to('cuda', dtype=torch.float32)
|
178 |
+
|
179 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
180 |
+
controlnet=controlnet, dreamer=dreamer, torch_dtype=torch.float32, use_safetensors=True
|
181 |
+
)
|
182 |
+
pipe.to('cuda', dtype=torch.float32)
|
183 |
+
|
184 |
+
# if deployed:
|
185 |
+
# model = instantiate_from_config(config.model)
|
186 |
+
# print(f'loading model from {ckpt} ...')
|
187 |
+
# ckpt = torch.load(ckpt,map_location='cpu')
|
188 |
+
# model.load_state_dict(ckpt['state_dict'], strict=True)
|
189 |
+
# model = model.cuda().eval()
|
190 |
+
# del ckpt
|
191 |
mask_predictor = sam_init()
|
192 |
removal = BackgroundRemoval()
|
193 |
else:
|
194 |
+
# model = None
|
195 |
+
# mask_predictor = None
|
196 |
+
# removal = None
|
197 |
+
controlnet = None
|
198 |
+
dreamer = None
|
199 |
+
pipe = None
|
200 |
|
201 |
# NOTE: Examples must match inputs
|
202 |
examples_full = [
|
|
|
211 |
]
|
212 |
|
213 |
image_block = gr.Image(type='pil', image_mode='RGBA', height=256, label='Input image', tool=None, interactive=True)
|
214 |
+
azimuth = gr.Slider(0, 360, 90, step=22.5, label='Target azimuth', interactive=True)
|
215 |
crop_size = gr.Slider(120, 240, 200, step=10, label='Crop size', interactive=True)
|
216 |
|
217 |
+
target_index = round(azimuth % 360 / 22.5)
|
218 |
+
|
219 |
# Compose demo layout & data flow.
|
220 |
with gr.Blocks(title=_TITLE, css="hf_demo/style.css") as demo:
|
221 |
with gr.Row():
|
|
|
229 |
with gr.Column(scale=1.2):
|
230 |
gr.Examples(
|
231 |
examples=examples_full, # NOTE: elements must match inputs list!
|
232 |
+
inputs=[image_block, azimuth, crop_size],
|
233 |
+
outputs=[image_block, azimuth, crop_size],
|
234 |
cache_examples=False,
|
235 |
label='Examples (click one of the images below to start)',
|
236 |
examples_per_page=5,
|
|
|
250 |
|
251 |
with gr.Column(scale=0.8):
|
252 |
input_block = gr.Image(type='pil', image_mode='RGBA', label="Input to SyncDreamer", height=256, interactive=False)
|
253 |
+
azimuth.render()
|
254 |
with gr.Accordion('Advanced options', open=False):
|
255 |
cfg_scale = gr.Slider(1.0, 5.0, 2.0, step=0.1, label='Classifier free guidance', interactive=True)
|
256 |
sample_num = gr.Slider(1, 2, 1, step=1, label='Sample num', interactive=False, info='How many instance (16 images per instance)')
|
|
|
279 |
# crop_btn.click(fn=resize_inputs, inputs=[sam_block, crop_size], outputs=[input_block], queue=False)\
|
280 |
# .success(fn=partial(update_guide, _USER_GUIDE2), outputs=[guide_text], queue=False)
|
281 |
|
282 |
+
run_btn.click(partial(generate, pipe), inputs=[input_block, target_index], outputs=[output_block], queue=True)\
|
283 |
.success(fn=partial(update_guide, _USER_GUIDE3), outputs=[guide_text], queue=False)
|
284 |
|
285 |
demo.queue().launch(share=False, max_threads=80) # auth=("admin", os.environ['PASSWD'])
|
controlnet_ckpt/config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "ControlNetModelSync",
|
3 |
+
"_diffusers_version": "0.25.0.dev0",
|
4 |
+
"attention_resolutions": [
|
5 |
+
4,
|
6 |
+
2,
|
7 |
+
1
|
8 |
+
],
|
9 |
+
"channel_mult": [
|
10 |
+
1,
|
11 |
+
2,
|
12 |
+
4,
|
13 |
+
4
|
14 |
+
],
|
15 |
+
"context_dim": 768,
|
16 |
+
"image_size": 32,
|
17 |
+
"in_channels": 8,
|
18 |
+
"legacy": false,
|
19 |
+
"model_channels": 320,
|
20 |
+
"num_heads": 8,
|
21 |
+
"num_res_blocks": 2,
|
22 |
+
"out_channels": 4,
|
23 |
+
"transformer_depth": 1,
|
24 |
+
"use_checkpoint": false,
|
25 |
+
"use_spatial_transformer": true,
|
26 |
+
"volume_dims": [
|
27 |
+
64,
|
28 |
+
128,
|
29 |
+
256,
|
30 |
+
512
|
31 |
+
]
|
32 |
+
}
|
controlnet_sync.py
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
16 |
+
|
17 |
+
import os
|
18 |
+
from typing import Any, Callable, List, Optional, Tuple, Union
|
19 |
+
import torch
|
20 |
+
from torch import nn
|
21 |
+
from torch.nn import functional as F
|
22 |
+
|
23 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
24 |
+
|
25 |
+
from diffusers.loaders import FromOriginalControlnetMixin
|
26 |
+
from diffusers.utils import BaseOutput, logging
|
27 |
+
from diffusers.models.attention_processor import (
|
28 |
+
ADDED_KV_ATTENTION_PROCESSORS,
|
29 |
+
CROSS_ATTENTION_PROCESSORS,
|
30 |
+
AttentionProcessor,
|
31 |
+
AttnAddedKVProcessor,
|
32 |
+
AttnProcessor,
|
33 |
+
)
|
34 |
+
from diffusers.models.embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
|
35 |
+
from diffusers.models.modeling_utils import ModelMixin
|
36 |
+
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, DownBlock2D, UNetMidBlock2D, UNetMidBlock2DCrossAttn, get_down_block
|
37 |
+
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
38 |
+
|
39 |
+
from diffusers.utils import (
|
40 |
+
CONFIG_NAME,
|
41 |
+
FLAX_WEIGHTS_NAME,
|
42 |
+
MIN_PEFT_VERSION,
|
43 |
+
SAFETENSORS_WEIGHTS_NAME,
|
44 |
+
WEIGHTS_NAME,
|
45 |
+
_add_variant,
|
46 |
+
_get_model_file,
|
47 |
+
check_peft_version,
|
48 |
+
deprecate,
|
49 |
+
is_accelerate_available,
|
50 |
+
is_torch_version,
|
51 |
+
logging,
|
52 |
+
)
|
53 |
+
from diffusers.utils.hub_utils import PushToHubMixin
|
54 |
+
|
55 |
+
from SyncDreamer.ldm.modules.attention import default, zero_module, checkpoint
|
56 |
+
from SyncDreamer.ldm.modules.diffusionmodules.openaimodel import UNetModel
|
57 |
+
from SyncDreamer.ldm.modules.diffusionmodules.util import timestep_embedding
|
58 |
+
from SyncDreamer.ldm.models.diffusion.sync_dreamer_attention import DepthWiseAttention
|
59 |
+
|
60 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
61 |
+
|
62 |
+
class DepthAttention(nn.Module):
|
63 |
+
def __init__(self, query_dim, context_dim, heads, dim_head, output_bias=True):
|
64 |
+
super().__init__()
|
65 |
+
inner_dim = dim_head * heads
|
66 |
+
context_dim = default(context_dim, query_dim)
|
67 |
+
|
68 |
+
self.scale = dim_head ** -0.5
|
69 |
+
self.heads = heads
|
70 |
+
self.dim_head = dim_head
|
71 |
+
|
72 |
+
self.to_q = nn.Conv2d(query_dim, inner_dim, 1, 1, bias=False)
|
73 |
+
self.to_k = nn.Conv3d(context_dim, inner_dim, 1, 1, bias=False)
|
74 |
+
self.to_v = nn.Conv3d(context_dim, inner_dim, 1, 1, bias=False)
|
75 |
+
if output_bias:
|
76 |
+
self.to_out = nn.Conv2d(inner_dim, query_dim, 1, 1)
|
77 |
+
else:
|
78 |
+
self.to_out = nn.Conv2d(inner_dim, query_dim, 1, 1, bias=False)
|
79 |
+
|
80 |
+
def forward(self, x, context):
|
81 |
+
"""
|
82 |
+
|
83 |
+
@param x: b,f0,h,w
|
84 |
+
@param context: b,f1,d,h,w
|
85 |
+
@return:
|
86 |
+
"""
|
87 |
+
hn, hd = self.heads, self.dim_head
|
88 |
+
b, _, h, w = x.shape
|
89 |
+
b, _, d, h, w = context.shape
|
90 |
+
|
91 |
+
q = self.to_q(x).reshape(b,hn,hd,h,w) # b,t,h,w
|
92 |
+
k = self.to_k(context).reshape(b,hn,hd,d,h,w) # b,t,d,h,w
|
93 |
+
v = self.to_v(context).reshape(b,hn,hd,d,h,w) # b,t,d,h,w
|
94 |
+
|
95 |
+
sim = torch.sum(q.unsqueeze(3) * k, 2) * self.scale # b,hn,d,h,w
|
96 |
+
attn = sim.softmax(dim=2)
|
97 |
+
|
98 |
+
# b,hn,hd,d,h,w * b,hn,1,d,h,w
|
99 |
+
out = torch.sum(v * attn.unsqueeze(2), 3) # b,hn,hd,h,w
|
100 |
+
out = out.reshape(b,hn*hd,h,w)
|
101 |
+
return self.to_out(out)
|
102 |
+
|
103 |
+
|
104 |
+
class DepthTransformer(nn.Module):
|
105 |
+
def __init__(self, dim, n_heads, d_head, context_dim=None, checkpoint=False):
|
106 |
+
super().__init__()
|
107 |
+
inner_dim = n_heads * d_head
|
108 |
+
self.proj_in = nn.Sequential(
|
109 |
+
nn.Conv2d(dim, inner_dim, 1, 1),
|
110 |
+
nn.GroupNorm(8, inner_dim),
|
111 |
+
nn.SiLU(True),
|
112 |
+
)
|
113 |
+
self.proj_context = nn.Sequential(
|
114 |
+
nn.Conv3d(context_dim, context_dim, 1, 1, bias=False), # no bias
|
115 |
+
nn.GroupNorm(8, context_dim),
|
116 |
+
nn.ReLU(True), # only relu, because we want input is 0, output is 0
|
117 |
+
)
|
118 |
+
self.depth_attn = DepthAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, context_dim=context_dim, output_bias=False) # is a self-attention if not self.disable_self_attn
|
119 |
+
self.proj_out = nn.Sequential(
|
120 |
+
nn.GroupNorm(8, inner_dim),
|
121 |
+
nn.ReLU(True),
|
122 |
+
nn.Conv2d(inner_dim, inner_dim, 3, 1, 1, bias=False),
|
123 |
+
nn.GroupNorm(8, inner_dim),
|
124 |
+
nn.ReLU(True),
|
125 |
+
zero_module(nn.Conv2d(inner_dim, dim, 3, 1, 1, bias=False)),
|
126 |
+
)
|
127 |
+
self.checkpoint = checkpoint
|
128 |
+
|
129 |
+
def forward(self, x, context=None):
|
130 |
+
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
|
131 |
+
|
132 |
+
def _forward(self, x, context):
|
133 |
+
x_in = x
|
134 |
+
x = self.proj_in(x)
|
135 |
+
context = self.proj_context(context)
|
136 |
+
x = self.depth_attn(x, context)
|
137 |
+
x = self.proj_out(x) + x_in
|
138 |
+
return x
|
139 |
+
|
140 |
+
@dataclass
|
141 |
+
class ControlNetOutputSync(BaseOutput):
|
142 |
+
"""
|
143 |
+
The output of [`ControlNetModelSync`].
|
144 |
+
|
145 |
+
Args:
|
146 |
+
down_block_res_samples (`tuple[torch.Tensor]`):
|
147 |
+
A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
|
148 |
+
be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
|
149 |
+
used to condition the original UNet's downsampling activations.
|
150 |
+
mid_down_block_re_sample (`torch.Tensor`):
|
151 |
+
The activation of the midde block (the lowest sample resolution). Each tensor should be of shape
|
152 |
+
`(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
|
153 |
+
Output can be used to condition the original UNet's middle block activation.
|
154 |
+
"""
|
155 |
+
|
156 |
+
down_block_res_samples: Tuple[torch.Tensor]
|
157 |
+
mid_block_res_sample: torch.Tensor
|
158 |
+
|
159 |
+
|
160 |
+
class ControlNetConditioningEmbeddingSync(nn.Module):
|
161 |
+
"""
|
162 |
+
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
|
163 |
+
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
|
164 |
+
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
|
165 |
+
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
|
166 |
+
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
|
167 |
+
model) to encode image-space conditions ... into feature maps ..."
|
168 |
+
"""
|
169 |
+
|
170 |
+
def __init__(
|
171 |
+
self,
|
172 |
+
conditioning_embedding_channels: int,
|
173 |
+
conditioning_channels: int = 3,
|
174 |
+
block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
|
175 |
+
):
|
176 |
+
super().__init__()
|
177 |
+
|
178 |
+
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
|
179 |
+
|
180 |
+
self.blocks = nn.ModuleList([])
|
181 |
+
|
182 |
+
for i in range(len(block_out_channels) - 1):
|
183 |
+
channel_in = block_out_channels[i]
|
184 |
+
channel_out = block_out_channels[i + 1]
|
185 |
+
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
|
186 |
+
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
|
187 |
+
|
188 |
+
self.conv_out = zero_module(
|
189 |
+
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
|
190 |
+
)
|
191 |
+
|
192 |
+
def forward(self, conditioning):
|
193 |
+
embedding = self.conv_in(conditioning)
|
194 |
+
embedding = F.silu(embedding)
|
195 |
+
|
196 |
+
for block in self.blocks:
|
197 |
+
embedding = block(embedding)
|
198 |
+
embedding = F.silu(embedding)
|
199 |
+
|
200 |
+
embedding = self.conv_out(embedding)
|
201 |
+
|
202 |
+
return embedding
|
203 |
+
|
204 |
+
|
205 |
+
class ControlNetModelSync(UNetModel, ModelMixin, ConfigMixin):
|
206 |
+
use_fp16 = False
|
207 |
+
dtype = torch.float16 if use_fp16 else torch.float32
|
208 |
+
|
209 |
+
@register_to_config
|
210 |
+
def __init__(
|
211 |
+
self,
|
212 |
+
volume_dims=[64, 128, 256, 512],
|
213 |
+
image_size=32,
|
214 |
+
in_channels=8,
|
215 |
+
model_channels=320,
|
216 |
+
out_channels=4,
|
217 |
+
num_res_blocks=2,
|
218 |
+
attention_resolutions=[4, 2, 1],
|
219 |
+
channel_mult=[1, 2, 4, 4],
|
220 |
+
use_checkpoint=False,
|
221 |
+
legacy=False,
|
222 |
+
num_heads=8,
|
223 |
+
use_spatial_transformer=True,
|
224 |
+
transformer_depth=1,
|
225 |
+
context_dim=768,
|
226 |
+
):
|
227 |
+
|
228 |
+
super().__init__(image_size=image_size, in_channels=in_channels, model_channels=model_channels, out_channels=out_channels, num_res_blocks=num_res_blocks, attention_resolutions=attention_resolutions, channel_mult=channel_mult, use_checkpoint=use_checkpoint, legacy=legacy, num_heads=num_heads, use_spatial_transformer=use_spatial_transformer, transformer_depth=transformer_depth, context_dim=context_dim)
|
229 |
+
|
230 |
+
block_out_channels = (320, 640, 1280, 1280)
|
231 |
+
conditioning_embedding_out_channels = (16, 32, 96, 256)
|
232 |
+
conditioning_channels = 3
|
233 |
+
down_block_types = (
|
234 |
+
"CrossAttnDownBlock2D",
|
235 |
+
"CrossAttnDownBlock2D",
|
236 |
+
"CrossAttnDownBlock2D",
|
237 |
+
"DownBlock2D",
|
238 |
+
)
|
239 |
+
layers_per_block = 2
|
240 |
+
|
241 |
+
# input
|
242 |
+
conv_in_kernel = 3
|
243 |
+
conv_in_padding = (conv_in_kernel - 1) // 2
|
244 |
+
|
245 |
+
d0,d1,d2,d3 = volume_dims
|
246 |
+
|
247 |
+
# 4
|
248 |
+
ch = model_channels*channel_mult[2]
|
249 |
+
self.middle_conditions = DepthTransformer(ch, 4, d3 // 2, context_dim=d3)
|
250 |
+
|
251 |
+
self.controlnet_cond_embedding = ControlNetConditioningEmbeddingSync(
|
252 |
+
conditioning_embedding_channels=self.in_channels,
|
253 |
+
block_out_channels=conditioning_embedding_out_channels,
|
254 |
+
conditioning_channels=conditioning_channels,
|
255 |
+
)
|
256 |
+
|
257 |
+
self.controlnet_down_blocks = nn.ModuleList([])
|
258 |
+
# down
|
259 |
+
output_channel = block_out_channels[0]
|
260 |
+
|
261 |
+
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
|
262 |
+
controlnet_block = zero_module(controlnet_block)
|
263 |
+
self.controlnet_down_blocks.append(controlnet_block)
|
264 |
+
|
265 |
+
for i, down_block_type in enumerate(down_block_types):
|
266 |
+
input_channel = output_channel
|
267 |
+
output_channel = block_out_channels[i]
|
268 |
+
is_final_block = i == len(block_out_channels) - 1
|
269 |
+
|
270 |
+
for _ in range(layers_per_block):
|
271 |
+
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
|
272 |
+
controlnet_block = zero_module(controlnet_block)
|
273 |
+
self.controlnet_down_blocks.append(controlnet_block)
|
274 |
+
|
275 |
+
if not is_final_block:
|
276 |
+
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
|
277 |
+
controlnet_block = zero_module(controlnet_block)
|
278 |
+
self.controlnet_down_blocks.append(controlnet_block)
|
279 |
+
|
280 |
+
# mid
|
281 |
+
mid_block_channel = block_out_channels[-1]
|
282 |
+
|
283 |
+
controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)
|
284 |
+
controlnet_block = zero_module(controlnet_block)
|
285 |
+
self.controlnet_mid_block = controlnet_block
|
286 |
+
|
287 |
+
@classmethod
|
288 |
+
def from_unet(
|
289 |
+
cls,
|
290 |
+
unet: DepthWiseAttention,
|
291 |
+
load_weights_from_unet: bool = True,
|
292 |
+
):
|
293 |
+
r"""
|
294 |
+
Instantiate a [`ControlNetModelSync`] from [`DepthWiseAttention`].
|
295 |
+
|
296 |
+
Parameters:
|
297 |
+
unet (`DepthWiseAttention`):
|
298 |
+
The UNet model weights to copy to the [`ControlNetModelSync`]. All configuration options are also copied
|
299 |
+
where applicable.
|
300 |
+
"""
|
301 |
+
|
302 |
+
controlnet = cls(
|
303 |
+
image_size=32,
|
304 |
+
in_channels=8,
|
305 |
+
model_channels=320,
|
306 |
+
out_channels=4,
|
307 |
+
num_res_blocks=2,
|
308 |
+
attention_resolutions=[ 4, 2, 1 ],
|
309 |
+
num_heads=8,
|
310 |
+
volume_dims=[64, 128, 256, 512],
|
311 |
+
channel_mult=[ 1, 2, 4, 4 ],
|
312 |
+
use_spatial_transformer=True,
|
313 |
+
transformer_depth=1,
|
314 |
+
context_dim=768,
|
315 |
+
use_checkpoint=False,
|
316 |
+
legacy=False,
|
317 |
+
)
|
318 |
+
|
319 |
+
if load_weights_from_unet:
|
320 |
+
controlnet.time_embed.load_state_dict(unet.time_embed.state_dict())
|
321 |
+
controlnet.input_blocks.load_state_dict(unet.input_blocks.state_dict())
|
322 |
+
controlnet.middle_block.load_state_dict(unet.middle_block.state_dict())
|
323 |
+
controlnet.middle_conditions.load_state_dict(unet.middle_conditions.state_dict())
|
324 |
+
|
325 |
+
return controlnet
|
326 |
+
|
327 |
+
def forward(self, x, timesteps=None, controlnet_cond=None, conditioning_scale=1.0, context=None, return_dict = True, source_dict=None, **kwargs):
|
328 |
+
|
329 |
+
# 1-4. Down and mid blocks, incluidng time embedding
|
330 |
+
if len(timesteps.shape) == 0:
|
331 |
+
timesteps = timesteps[None].to(x.device)
|
332 |
+
hs = []
|
333 |
+
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
334 |
+
emb = self.time_embed(t_emb)
|
335 |
+
controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
|
336 |
+
x = x + controlnet_cond
|
337 |
+
h = x.type(self.dtype)
|
338 |
+
for index, module in enumerate(self.input_blocks):
|
339 |
+
h = module(h, emb, context)
|
340 |
+
hs.append(h)
|
341 |
+
|
342 |
+
h = self.middle_block(h, emb, context)
|
343 |
+
h = self.middle_conditions(h, context=source_dict[h.shape[-1]])
|
344 |
+
|
345 |
+
# 5. Control net blocks
|
346 |
+
controlnet_down_block_res_samples = ()
|
347 |
+
|
348 |
+
assert len(hs) == len(self.controlnet_down_blocks), "Number of layers in 'hs' should be equal to 'controlnet_down_blocks'"
|
349 |
+
|
350 |
+
for down_block_res_sample, controlnet_block in zip(hs, self.controlnet_down_blocks):
|
351 |
+
down_block_res_sample = controlnet_block(down_block_res_sample)
|
352 |
+
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
|
353 |
+
|
354 |
+
down_block_res_samples = controlnet_down_block_res_samples
|
355 |
+
|
356 |
+
mid_block_res_sample = self.controlnet_mid_block(h)
|
357 |
+
|
358 |
+
if not return_dict:
|
359 |
+
return (down_block_res_samples, mid_block_res_sample)
|
360 |
+
|
361 |
+
return ControlNetOutputSync(
|
362 |
+
down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
|
363 |
+
)
|
364 |
+
|
365 |
+
def zero_module(module):
|
366 |
+
for p in module.parameters():
|
367 |
+
nn.init.zeros_(p)
|
368 |
+
return module
|
diffusers/__init__.py
ADDED
@@ -0,0 +1,758 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__version__ = "0.26.0.dev0"
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
|
5 |
+
from .utils import (
|
6 |
+
DIFFUSERS_SLOW_IMPORT,
|
7 |
+
OptionalDependencyNotAvailable,
|
8 |
+
_LazyModule,
|
9 |
+
is_flax_available,
|
10 |
+
is_k_diffusion_available,
|
11 |
+
is_librosa_available,
|
12 |
+
is_note_seq_available,
|
13 |
+
is_onnx_available,
|
14 |
+
is_scipy_available,
|
15 |
+
is_torch_available,
|
16 |
+
is_torchsde_available,
|
17 |
+
is_transformers_available,
|
18 |
+
)
|
19 |
+
|
20 |
+
|
21 |
+
# Lazy Import based on
|
22 |
+
# https://github.com/huggingface/transformers/blob/main/src/transformers/__init__.py
|
23 |
+
|
24 |
+
# When adding a new object to this init, please add it to `_import_structure`. The `_import_structure` is a dictionary submodule to list of object names,
|
25 |
+
# and is used to defer the actual importing for when the objects are requested.
|
26 |
+
# This way `import diffusers` provides the names in the namespace without actually importing anything (and especially none of the backends).
|
27 |
+
|
28 |
+
_import_structure = {
|
29 |
+
"configuration_utils": ["ConfigMixin"],
|
30 |
+
"models": [],
|
31 |
+
"pipelines": [],
|
32 |
+
"schedulers": [],
|
33 |
+
"utils": [
|
34 |
+
"OptionalDependencyNotAvailable",
|
35 |
+
"is_flax_available",
|
36 |
+
"is_inflect_available",
|
37 |
+
"is_invisible_watermark_available",
|
38 |
+
"is_k_diffusion_available",
|
39 |
+
"is_k_diffusion_version",
|
40 |
+
"is_librosa_available",
|
41 |
+
"is_note_seq_available",
|
42 |
+
"is_onnx_available",
|
43 |
+
"is_scipy_available",
|
44 |
+
"is_torch_available",
|
45 |
+
"is_torchsde_available",
|
46 |
+
"is_transformers_available",
|
47 |
+
"is_transformers_version",
|
48 |
+
"is_unidecode_available",
|
49 |
+
"logging",
|
50 |
+
],
|
51 |
+
}
|
52 |
+
|
53 |
+
try:
|
54 |
+
if not is_onnx_available():
|
55 |
+
raise OptionalDependencyNotAvailable()
|
56 |
+
except OptionalDependencyNotAvailable:
|
57 |
+
from .utils import dummy_onnx_objects # noqa F403
|
58 |
+
|
59 |
+
_import_structure["utils.dummy_onnx_objects"] = [
|
60 |
+
name for name in dir(dummy_onnx_objects) if not name.startswith("_")
|
61 |
+
]
|
62 |
+
|
63 |
+
else:
|
64 |
+
_import_structure["pipelines"].extend(["OnnxRuntimeModel"])
|
65 |
+
|
66 |
+
try:
|
67 |
+
if not is_torch_available():
|
68 |
+
raise OptionalDependencyNotAvailable()
|
69 |
+
except OptionalDependencyNotAvailable:
|
70 |
+
from .utils import dummy_pt_objects # noqa F403
|
71 |
+
|
72 |
+
_import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")]
|
73 |
+
|
74 |
+
else:
|
75 |
+
_import_structure["models"].extend(
|
76 |
+
[
|
77 |
+
"AsymmetricAutoencoderKL",
|
78 |
+
"AutoencoderKL",
|
79 |
+
"AutoencoderKLTemporalDecoder",
|
80 |
+
"AutoencoderTiny",
|
81 |
+
"ConsistencyDecoderVAE",
|
82 |
+
"ControlNetModel",
|
83 |
+
"Kandinsky3UNet",
|
84 |
+
"ModelMixin",
|
85 |
+
"MotionAdapter",
|
86 |
+
"MultiAdapter",
|
87 |
+
"PriorTransformer",
|
88 |
+
"T2IAdapter",
|
89 |
+
"T5FilmDecoder",
|
90 |
+
"Transformer2DModel",
|
91 |
+
"UNet1DModel",
|
92 |
+
"UNet2DConditionModel",
|
93 |
+
"UNet2DModel",
|
94 |
+
"UNet3DConditionModel",
|
95 |
+
"UNetMotionModel",
|
96 |
+
"UNetSpatioTemporalConditionModel",
|
97 |
+
"UVit2DModel",
|
98 |
+
"VQModel",
|
99 |
+
]
|
100 |
+
)
|
101 |
+
|
102 |
+
_import_structure["optimization"] = [
|
103 |
+
"get_constant_schedule",
|
104 |
+
"get_constant_schedule_with_warmup",
|
105 |
+
"get_cosine_schedule_with_warmup",
|
106 |
+
"get_cosine_with_hard_restarts_schedule_with_warmup",
|
107 |
+
"get_linear_schedule_with_warmup",
|
108 |
+
"get_polynomial_decay_schedule_with_warmup",
|
109 |
+
"get_scheduler",
|
110 |
+
]
|
111 |
+
_import_structure["pipelines"].extend(
|
112 |
+
[
|
113 |
+
"AudioPipelineOutput",
|
114 |
+
"AutoPipelineForImage2Image",
|
115 |
+
"AutoPipelineForInpainting",
|
116 |
+
"AutoPipelineForText2Image",
|
117 |
+
"ConsistencyModelPipeline",
|
118 |
+
"DanceDiffusionPipeline",
|
119 |
+
"DDIMPipeline",
|
120 |
+
"DDPMPipeline",
|
121 |
+
"DiffusionPipeline",
|
122 |
+
"DiTPipeline",
|
123 |
+
"ImagePipelineOutput",
|
124 |
+
"KarrasVePipeline",
|
125 |
+
"LDMPipeline",
|
126 |
+
"LDMSuperResolutionPipeline",
|
127 |
+
"PNDMPipeline",
|
128 |
+
"RePaintPipeline",
|
129 |
+
"ScoreSdeVePipeline",
|
130 |
+
]
|
131 |
+
)
|
132 |
+
_import_structure["schedulers"].extend(
|
133 |
+
[
|
134 |
+
"AmusedScheduler",
|
135 |
+
"CMStochasticIterativeScheduler",
|
136 |
+
"DDIMInverseScheduler",
|
137 |
+
"DDIMParallelScheduler",
|
138 |
+
"DDIMScheduler",
|
139 |
+
"DDPMParallelScheduler",
|
140 |
+
"DDPMScheduler",
|
141 |
+
"DDPMWuerstchenScheduler",
|
142 |
+
"DEISMultistepScheduler",
|
143 |
+
"DPMSolverMultistepInverseScheduler",
|
144 |
+
"DPMSolverMultistepScheduler",
|
145 |
+
"DPMSolverSinglestepScheduler",
|
146 |
+
"EulerAncestralDiscreteScheduler",
|
147 |
+
"EulerDiscreteScheduler",
|
148 |
+
"HeunDiscreteScheduler",
|
149 |
+
"IPNDMScheduler",
|
150 |
+
"KarrasVeScheduler",
|
151 |
+
"KDPM2AncestralDiscreteScheduler",
|
152 |
+
"KDPM2DiscreteScheduler",
|
153 |
+
"LCMScheduler",
|
154 |
+
"PNDMScheduler",
|
155 |
+
"RePaintScheduler",
|
156 |
+
"SchedulerMixin",
|
157 |
+
"ScoreSdeVeScheduler",
|
158 |
+
"UnCLIPScheduler",
|
159 |
+
"UniPCMultistepScheduler",
|
160 |
+
"VQDiffusionScheduler",
|
161 |
+
]
|
162 |
+
)
|
163 |
+
_import_structure["training_utils"] = ["EMAModel"]
|
164 |
+
|
165 |
+
try:
|
166 |
+
if not (is_torch_available() and is_scipy_available()):
|
167 |
+
raise OptionalDependencyNotAvailable()
|
168 |
+
except OptionalDependencyNotAvailable:
|
169 |
+
from .utils import dummy_torch_and_scipy_objects # noqa F403
|
170 |
+
|
171 |
+
_import_structure["utils.dummy_torch_and_scipy_objects"] = [
|
172 |
+
name for name in dir(dummy_torch_and_scipy_objects) if not name.startswith("_")
|
173 |
+
]
|
174 |
+
|
175 |
+
else:
|
176 |
+
_import_structure["schedulers"].extend(["LMSDiscreteScheduler"])
|
177 |
+
|
178 |
+
try:
|
179 |
+
if not (is_torch_available() and is_torchsde_available()):
|
180 |
+
raise OptionalDependencyNotAvailable()
|
181 |
+
except OptionalDependencyNotAvailable:
|
182 |
+
from .utils import dummy_torch_and_torchsde_objects # noqa F403
|
183 |
+
|
184 |
+
_import_structure["utils.dummy_torch_and_torchsde_objects"] = [
|
185 |
+
name for name in dir(dummy_torch_and_torchsde_objects) if not name.startswith("_")
|
186 |
+
]
|
187 |
+
|
188 |
+
else:
|
189 |
+
_import_structure["schedulers"].extend(["DPMSolverSDEScheduler"])
|
190 |
+
|
191 |
+
try:
|
192 |
+
if not (is_torch_available() and is_transformers_available()):
|
193 |
+
raise OptionalDependencyNotAvailable()
|
194 |
+
except OptionalDependencyNotAvailable:
|
195 |
+
from .utils import dummy_torch_and_transformers_objects # noqa F403
|
196 |
+
|
197 |
+
_import_structure["utils.dummy_torch_and_transformers_objects"] = [
|
198 |
+
name for name in dir(dummy_torch_and_transformers_objects) if not name.startswith("_")
|
199 |
+
]
|
200 |
+
|
201 |
+
else:
|
202 |
+
_import_structure["pipelines"].extend(
|
203 |
+
[
|
204 |
+
"AltDiffusionImg2ImgPipeline",
|
205 |
+
"AltDiffusionPipeline",
|
206 |
+
"AmusedImg2ImgPipeline",
|
207 |
+
"AmusedInpaintPipeline",
|
208 |
+
"AmusedPipeline",
|
209 |
+
"AnimateDiffPipeline",
|
210 |
+
"AudioLDM2Pipeline",
|
211 |
+
"AudioLDM2ProjectionModel",
|
212 |
+
"AudioLDM2UNet2DConditionModel",
|
213 |
+
"AudioLDMPipeline",
|
214 |
+
"BlipDiffusionControlNetPipeline",
|
215 |
+
"BlipDiffusionPipeline",
|
216 |
+
"CLIPImageProjection",
|
217 |
+
"CycleDiffusionPipeline",
|
218 |
+
"IFImg2ImgPipeline",
|
219 |
+
"IFImg2ImgSuperResolutionPipeline",
|
220 |
+
"IFInpaintingPipeline",
|
221 |
+
"IFInpaintingSuperResolutionPipeline",
|
222 |
+
"IFPipeline",
|
223 |
+
"IFSuperResolutionPipeline",
|
224 |
+
"ImageTextPipelineOutput",
|
225 |
+
"Kandinsky3Img2ImgPipeline",
|
226 |
+
"Kandinsky3Pipeline",
|
227 |
+
"KandinskyCombinedPipeline",
|
228 |
+
"KandinskyImg2ImgCombinedPipeline",
|
229 |
+
"KandinskyImg2ImgPipeline",
|
230 |
+
"KandinskyInpaintCombinedPipeline",
|
231 |
+
"KandinskyInpaintPipeline",
|
232 |
+
"KandinskyPipeline",
|
233 |
+
"KandinskyPriorPipeline",
|
234 |
+
"KandinskyV22CombinedPipeline",
|
235 |
+
"KandinskyV22ControlnetImg2ImgPipeline",
|
236 |
+
"KandinskyV22ControlnetPipeline",
|
237 |
+
"KandinskyV22Img2ImgCombinedPipeline",
|
238 |
+
"KandinskyV22Img2ImgPipeline",
|
239 |
+
"KandinskyV22InpaintCombinedPipeline",
|
240 |
+
"KandinskyV22InpaintPipeline",
|
241 |
+
"KandinskyV22Pipeline",
|
242 |
+
"KandinskyV22PriorEmb2EmbPipeline",
|
243 |
+
"KandinskyV22PriorPipeline",
|
244 |
+
"LatentConsistencyModelImg2ImgPipeline",
|
245 |
+
"LatentConsistencyModelPipeline",
|
246 |
+
"LDMTextToImagePipeline",
|
247 |
+
"MusicLDMPipeline",
|
248 |
+
"PaintByExamplePipeline",
|
249 |
+
"PixArtAlphaPipeline",
|
250 |
+
"SemanticStableDiffusionPipeline",
|
251 |
+
"ShapEImg2ImgPipeline",
|
252 |
+
"ShapEPipeline",
|
253 |
+
"StableDiffusionAdapterPipeline",
|
254 |
+
"StableDiffusionAttendAndExcitePipeline",
|
255 |
+
"StableDiffusionControlNetImg2ImgPipeline",
|
256 |
+
"StableDiffusionControlNetInpaintPipeline",
|
257 |
+
"StableDiffusionControlNetPipeline",
|
258 |
+
"StableDiffusionDepth2ImgPipeline",
|
259 |
+
"StableDiffusionDiffEditPipeline",
|
260 |
+
"StableDiffusionGLIGENPipeline",
|
261 |
+
"StableDiffusionGLIGENTextImagePipeline",
|
262 |
+
"StableDiffusionImageVariationPipeline",
|
263 |
+
"StableDiffusionImg2ImgPipeline",
|
264 |
+
"StableDiffusionInpaintPipeline",
|
265 |
+
"StableDiffusionInpaintPipelineLegacy",
|
266 |
+
"StableDiffusionInstructPix2PixPipeline",
|
267 |
+
"StableDiffusionLatentUpscalePipeline",
|
268 |
+
"StableDiffusionLDM3DPipeline",
|
269 |
+
"StableDiffusionModelEditingPipeline",
|
270 |
+
"StableDiffusionPanoramaPipeline",
|
271 |
+
"StableDiffusionParadigmsPipeline",
|
272 |
+
"StableDiffusionPipeline",
|
273 |
+
"StableDiffusionPipelineSafe",
|
274 |
+
"StableDiffusionPix2PixZeroPipeline",
|
275 |
+
"StableDiffusionSAGPipeline",
|
276 |
+
"StableDiffusionUpscalePipeline",
|
277 |
+
"StableDiffusionXLAdapterPipeline",
|
278 |
+
"StableDiffusionXLControlNetImg2ImgPipeline",
|
279 |
+
"StableDiffusionXLControlNetInpaintPipeline",
|
280 |
+
"StableDiffusionXLControlNetPipeline",
|
281 |
+
"StableDiffusionXLImg2ImgPipeline",
|
282 |
+
"StableDiffusionXLInpaintPipeline",
|
283 |
+
"StableDiffusionXLInstructPix2PixPipeline",
|
284 |
+
"StableDiffusionXLPipeline",
|
285 |
+
"StableUnCLIPImg2ImgPipeline",
|
286 |
+
"StableUnCLIPPipeline",
|
287 |
+
"StableVideoDiffusionPipeline",
|
288 |
+
"TextToVideoSDPipeline",
|
289 |
+
"TextToVideoZeroPipeline",
|
290 |
+
"TextToVideoZeroSDXLPipeline",
|
291 |
+
"UnCLIPImageVariationPipeline",
|
292 |
+
"UnCLIPPipeline",
|
293 |
+
"UniDiffuserModel",
|
294 |
+
"UniDiffuserPipeline",
|
295 |
+
"UniDiffuserTextDecoder",
|
296 |
+
"VersatileDiffusionDualGuidedPipeline",
|
297 |
+
"VersatileDiffusionImageVariationPipeline",
|
298 |
+
"VersatileDiffusionPipeline",
|
299 |
+
"VersatileDiffusionTextToImagePipeline",
|
300 |
+
"VideoToVideoSDPipeline",
|
301 |
+
"VQDiffusionPipeline",
|
302 |
+
"WuerstchenCombinedPipeline",
|
303 |
+
"WuerstchenDecoderPipeline",
|
304 |
+
"WuerstchenPriorPipeline",
|
305 |
+
]
|
306 |
+
)
|
307 |
+
|
308 |
+
try:
|
309 |
+
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
|
310 |
+
raise OptionalDependencyNotAvailable()
|
311 |
+
except OptionalDependencyNotAvailable:
|
312 |
+
from .utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403
|
313 |
+
|
314 |
+
_import_structure["utils.dummy_torch_and_transformers_and_k_diffusion_objects"] = [
|
315 |
+
name for name in dir(dummy_torch_and_transformers_and_k_diffusion_objects) if not name.startswith("_")
|
316 |
+
]
|
317 |
+
|
318 |
+
else:
|
319 |
+
_import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline"])
|
320 |
+
|
321 |
+
try:
|
322 |
+
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
|
323 |
+
raise OptionalDependencyNotAvailable()
|
324 |
+
except OptionalDependencyNotAvailable:
|
325 |
+
from .utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403
|
326 |
+
|
327 |
+
_import_structure["utils.dummy_torch_and_transformers_and_onnx_objects"] = [
|
328 |
+
name for name in dir(dummy_torch_and_transformers_and_onnx_objects) if not name.startswith("_")
|
329 |
+
]
|
330 |
+
|
331 |
+
else:
|
332 |
+
_import_structure["pipelines"].extend(
|
333 |
+
[
|
334 |
+
"OnnxStableDiffusionImg2ImgPipeline",
|
335 |
+
"OnnxStableDiffusionInpaintPipeline",
|
336 |
+
"OnnxStableDiffusionInpaintPipelineLegacy",
|
337 |
+
"OnnxStableDiffusionPipeline",
|
338 |
+
"OnnxStableDiffusionUpscalePipeline",
|
339 |
+
"StableDiffusionOnnxPipeline",
|
340 |
+
]
|
341 |
+
)
|
342 |
+
|
343 |
+
try:
|
344 |
+
if not (is_torch_available() and is_librosa_available()):
|
345 |
+
raise OptionalDependencyNotAvailable()
|
346 |
+
except OptionalDependencyNotAvailable:
|
347 |
+
from .utils import dummy_torch_and_librosa_objects # noqa F403
|
348 |
+
|
349 |
+
_import_structure["utils.dummy_torch_and_librosa_objects"] = [
|
350 |
+
name for name in dir(dummy_torch_and_librosa_objects) if not name.startswith("_")
|
351 |
+
]
|
352 |
+
|
353 |
+
else:
|
354 |
+
_import_structure["pipelines"].extend(["AudioDiffusionPipeline", "Mel"])
|
355 |
+
|
356 |
+
try:
|
357 |
+
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
|
358 |
+
raise OptionalDependencyNotAvailable()
|
359 |
+
except OptionalDependencyNotAvailable:
|
360 |
+
from .utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403
|
361 |
+
|
362 |
+
_import_structure["utils.dummy_transformers_and_torch_and_note_seq_objects"] = [
|
363 |
+
name for name in dir(dummy_transformers_and_torch_and_note_seq_objects) if not name.startswith("_")
|
364 |
+
]
|
365 |
+
|
366 |
+
|
367 |
+
else:
|
368 |
+
_import_structure["pipelines"].extend(["SpectrogramDiffusionPipeline"])
|
369 |
+
|
370 |
+
try:
|
371 |
+
if not is_flax_available():
|
372 |
+
raise OptionalDependencyNotAvailable()
|
373 |
+
except OptionalDependencyNotAvailable:
|
374 |
+
from .utils import dummy_flax_objects # noqa F403
|
375 |
+
|
376 |
+
_import_structure["utils.dummy_flax_objects"] = [
|
377 |
+
name for name in dir(dummy_flax_objects) if not name.startswith("_")
|
378 |
+
]
|
379 |
+
|
380 |
+
|
381 |
+
else:
|
382 |
+
_import_structure["models.controlnet_flax"] = ["FlaxControlNetModel"]
|
383 |
+
_import_structure["models.modeling_flax_utils"] = ["FlaxModelMixin"]
|
384 |
+
_import_structure["models.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
|
385 |
+
_import_structure["models.vae_flax"] = ["FlaxAutoencoderKL"]
|
386 |
+
_import_structure["pipelines"].extend(["FlaxDiffusionPipeline"])
|
387 |
+
_import_structure["schedulers"].extend(
|
388 |
+
[
|
389 |
+
"FlaxDDIMScheduler",
|
390 |
+
"FlaxDDPMScheduler",
|
391 |
+
"FlaxDPMSolverMultistepScheduler",
|
392 |
+
"FlaxEulerDiscreteScheduler",
|
393 |
+
"FlaxKarrasVeScheduler",
|
394 |
+
"FlaxLMSDiscreteScheduler",
|
395 |
+
"FlaxPNDMScheduler",
|
396 |
+
"FlaxSchedulerMixin",
|
397 |
+
"FlaxScoreSdeVeScheduler",
|
398 |
+
]
|
399 |
+
)
|
400 |
+
|
401 |
+
|
402 |
+
try:
|
403 |
+
if not (is_flax_available() and is_transformers_available()):
|
404 |
+
raise OptionalDependencyNotAvailable()
|
405 |
+
except OptionalDependencyNotAvailable:
|
406 |
+
from .utils import dummy_flax_and_transformers_objects # noqa F403
|
407 |
+
|
408 |
+
_import_structure["utils.dummy_flax_and_transformers_objects"] = [
|
409 |
+
name for name in dir(dummy_flax_and_transformers_objects) if not name.startswith("_")
|
410 |
+
]
|
411 |
+
|
412 |
+
|
413 |
+
else:
|
414 |
+
_import_structure["pipelines"].extend(
|
415 |
+
[
|
416 |
+
"FlaxStableDiffusionControlNetPipeline",
|
417 |
+
"FlaxStableDiffusionImg2ImgPipeline",
|
418 |
+
"FlaxStableDiffusionInpaintPipeline",
|
419 |
+
"FlaxStableDiffusionPipeline",
|
420 |
+
"FlaxStableDiffusionXLPipeline",
|
421 |
+
]
|
422 |
+
)
|
423 |
+
|
424 |
+
try:
|
425 |
+
if not (is_note_seq_available()):
|
426 |
+
raise OptionalDependencyNotAvailable()
|
427 |
+
except OptionalDependencyNotAvailable:
|
428 |
+
from .utils import dummy_note_seq_objects # noqa F403
|
429 |
+
|
430 |
+
_import_structure["utils.dummy_note_seq_objects"] = [
|
431 |
+
name for name in dir(dummy_note_seq_objects) if not name.startswith("_")
|
432 |
+
]
|
433 |
+
|
434 |
+
|
435 |
+
else:
|
436 |
+
_import_structure["pipelines"].extend(["MidiProcessor"])
|
437 |
+
|
438 |
+
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
439 |
+
from .configuration_utils import ConfigMixin
|
440 |
+
|
441 |
+
try:
|
442 |
+
if not is_onnx_available():
|
443 |
+
raise OptionalDependencyNotAvailable()
|
444 |
+
except OptionalDependencyNotAvailable:
|
445 |
+
from .utils.dummy_onnx_objects import * # noqa F403
|
446 |
+
else:
|
447 |
+
from .pipelines import OnnxRuntimeModel
|
448 |
+
|
449 |
+
try:
|
450 |
+
if not is_torch_available():
|
451 |
+
raise OptionalDependencyNotAvailable()
|
452 |
+
except OptionalDependencyNotAvailable:
|
453 |
+
from .utils.dummy_pt_objects import * # noqa F403
|
454 |
+
else:
|
455 |
+
from .models import (
|
456 |
+
AsymmetricAutoencoderKL,
|
457 |
+
AutoencoderKL,
|
458 |
+
AutoencoderKLTemporalDecoder,
|
459 |
+
AutoencoderTiny,
|
460 |
+
ConsistencyDecoderVAE,
|
461 |
+
ControlNetModel,
|
462 |
+
Kandinsky3UNet,
|
463 |
+
ModelMixin,
|
464 |
+
MotionAdapter,
|
465 |
+
MultiAdapter,
|
466 |
+
PriorTransformer,
|
467 |
+
T2IAdapter,
|
468 |
+
T5FilmDecoder,
|
469 |
+
Transformer2DModel,
|
470 |
+
UNet1DModel,
|
471 |
+
UNet2DConditionModel,
|
472 |
+
UNet2DModel,
|
473 |
+
UNet3DConditionModel,
|
474 |
+
UNetMotionModel,
|
475 |
+
UNetSpatioTemporalConditionModel,
|
476 |
+
UVit2DModel,
|
477 |
+
VQModel,
|
478 |
+
)
|
479 |
+
from .optimization import (
|
480 |
+
get_constant_schedule,
|
481 |
+
get_constant_schedule_with_warmup,
|
482 |
+
get_cosine_schedule_with_warmup,
|
483 |
+
get_cosine_with_hard_restarts_schedule_with_warmup,
|
484 |
+
get_linear_schedule_with_warmup,
|
485 |
+
get_polynomial_decay_schedule_with_warmup,
|
486 |
+
get_scheduler,
|
487 |
+
)
|
488 |
+
from .pipelines import (
|
489 |
+
AudioPipelineOutput,
|
490 |
+
AutoPipelineForImage2Image,
|
491 |
+
AutoPipelineForInpainting,
|
492 |
+
AutoPipelineForText2Image,
|
493 |
+
BlipDiffusionControlNetPipeline,
|
494 |
+
BlipDiffusionPipeline,
|
495 |
+
CLIPImageProjection,
|
496 |
+
ConsistencyModelPipeline,
|
497 |
+
DanceDiffusionPipeline,
|
498 |
+
DDIMPipeline,
|
499 |
+
DDPMPipeline,
|
500 |
+
DiffusionPipeline,
|
501 |
+
DiTPipeline,
|
502 |
+
ImagePipelineOutput,
|
503 |
+
KarrasVePipeline,
|
504 |
+
LDMPipeline,
|
505 |
+
LDMSuperResolutionPipeline,
|
506 |
+
PNDMPipeline,
|
507 |
+
RePaintPipeline,
|
508 |
+
ScoreSdeVePipeline,
|
509 |
+
)
|
510 |
+
from .schedulers import (
|
511 |
+
AmusedScheduler,
|
512 |
+
CMStochasticIterativeScheduler,
|
513 |
+
DDIMInverseScheduler,
|
514 |
+
DDIMParallelScheduler,
|
515 |
+
DDIMScheduler,
|
516 |
+
DDPMParallelScheduler,
|
517 |
+
DDPMScheduler,
|
518 |
+
DDPMWuerstchenScheduler,
|
519 |
+
DEISMultistepScheduler,
|
520 |
+
DPMSolverMultistepInverseScheduler,
|
521 |
+
DPMSolverMultistepScheduler,
|
522 |
+
DPMSolverSinglestepScheduler,
|
523 |
+
EulerAncestralDiscreteScheduler,
|
524 |
+
EulerDiscreteScheduler,
|
525 |
+
HeunDiscreteScheduler,
|
526 |
+
IPNDMScheduler,
|
527 |
+
KarrasVeScheduler,
|
528 |
+
KDPM2AncestralDiscreteScheduler,
|
529 |
+
KDPM2DiscreteScheduler,
|
530 |
+
LCMScheduler,
|
531 |
+
PNDMScheduler,
|
532 |
+
RePaintScheduler,
|
533 |
+
SchedulerMixin,
|
534 |
+
ScoreSdeVeScheduler,
|
535 |
+
UnCLIPScheduler,
|
536 |
+
UniPCMultistepScheduler,
|
537 |
+
VQDiffusionScheduler,
|
538 |
+
)
|
539 |
+
from .training_utils import EMAModel
|
540 |
+
|
541 |
+
try:
|
542 |
+
if not (is_torch_available() and is_scipy_available()):
|
543 |
+
raise OptionalDependencyNotAvailable()
|
544 |
+
except OptionalDependencyNotAvailable:
|
545 |
+
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
|
546 |
+
else:
|
547 |
+
from .schedulers import LMSDiscreteScheduler
|
548 |
+
|
549 |
+
try:
|
550 |
+
if not (is_torch_available() and is_torchsde_available()):
|
551 |
+
raise OptionalDependencyNotAvailable()
|
552 |
+
except OptionalDependencyNotAvailable:
|
553 |
+
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
|
554 |
+
else:
|
555 |
+
from .schedulers import DPMSolverSDEScheduler
|
556 |
+
|
557 |
+
try:
|
558 |
+
if not (is_torch_available() and is_transformers_available()):
|
559 |
+
raise OptionalDependencyNotAvailable()
|
560 |
+
except OptionalDependencyNotAvailable:
|
561 |
+
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
|
562 |
+
else:
|
563 |
+
from .pipelines import (
|
564 |
+
AltDiffusionImg2ImgPipeline,
|
565 |
+
AltDiffusionPipeline,
|
566 |
+
AmusedImg2ImgPipeline,
|
567 |
+
AmusedInpaintPipeline,
|
568 |
+
AmusedPipeline,
|
569 |
+
AnimateDiffPipeline,
|
570 |
+
AudioLDM2Pipeline,
|
571 |
+
AudioLDM2ProjectionModel,
|
572 |
+
AudioLDM2UNet2DConditionModel,
|
573 |
+
AudioLDMPipeline,
|
574 |
+
CLIPImageProjection,
|
575 |
+
CycleDiffusionPipeline,
|
576 |
+
IFImg2ImgPipeline,
|
577 |
+
IFImg2ImgSuperResolutionPipeline,
|
578 |
+
IFInpaintingPipeline,
|
579 |
+
IFInpaintingSuperResolutionPipeline,
|
580 |
+
IFPipeline,
|
581 |
+
IFSuperResolutionPipeline,
|
582 |
+
ImageTextPipelineOutput,
|
583 |
+
Kandinsky3Img2ImgPipeline,
|
584 |
+
Kandinsky3Pipeline,
|
585 |
+
KandinskyCombinedPipeline,
|
586 |
+
KandinskyImg2ImgCombinedPipeline,
|
587 |
+
KandinskyImg2ImgPipeline,
|
588 |
+
KandinskyInpaintCombinedPipeline,
|
589 |
+
KandinskyInpaintPipeline,
|
590 |
+
KandinskyPipeline,
|
591 |
+
KandinskyPriorPipeline,
|
592 |
+
KandinskyV22CombinedPipeline,
|
593 |
+
KandinskyV22ControlnetImg2ImgPipeline,
|
594 |
+
KandinskyV22ControlnetPipeline,
|
595 |
+
KandinskyV22Img2ImgCombinedPipeline,
|
596 |
+
KandinskyV22Img2ImgPipeline,
|
597 |
+
KandinskyV22InpaintCombinedPipeline,
|
598 |
+
KandinskyV22InpaintPipeline,
|
599 |
+
KandinskyV22Pipeline,
|
600 |
+
KandinskyV22PriorEmb2EmbPipeline,
|
601 |
+
KandinskyV22PriorPipeline,
|
602 |
+
LatentConsistencyModelImg2ImgPipeline,
|
603 |
+
LatentConsistencyModelPipeline,
|
604 |
+
LDMTextToImagePipeline,
|
605 |
+
MusicLDMPipeline,
|
606 |
+
PaintByExamplePipeline,
|
607 |
+
PixArtAlphaPipeline,
|
608 |
+
SemanticStableDiffusionPipeline,
|
609 |
+
ShapEImg2ImgPipeline,
|
610 |
+
ShapEPipeline,
|
611 |
+
StableDiffusionAdapterPipeline,
|
612 |
+
StableDiffusionAttendAndExcitePipeline,
|
613 |
+
StableDiffusionControlNetImg2ImgPipeline,
|
614 |
+
StableDiffusionControlNetInpaintPipeline,
|
615 |
+
StableDiffusionControlNetPipeline,
|
616 |
+
StableDiffusionDepth2ImgPipeline,
|
617 |
+
StableDiffusionDiffEditPipeline,
|
618 |
+
StableDiffusionGLIGENPipeline,
|
619 |
+
StableDiffusionGLIGENTextImagePipeline,
|
620 |
+
StableDiffusionImageVariationPipeline,
|
621 |
+
StableDiffusionImg2ImgPipeline,
|
622 |
+
StableDiffusionInpaintPipeline,
|
623 |
+
StableDiffusionInpaintPipelineLegacy,
|
624 |
+
StableDiffusionInstructPix2PixPipeline,
|
625 |
+
StableDiffusionLatentUpscalePipeline,
|
626 |
+
StableDiffusionLDM3DPipeline,
|
627 |
+
StableDiffusionModelEditingPipeline,
|
628 |
+
StableDiffusionPanoramaPipeline,
|
629 |
+
StableDiffusionParadigmsPipeline,
|
630 |
+
StableDiffusionPipeline,
|
631 |
+
StableDiffusionPipelineSafe,
|
632 |
+
StableDiffusionPix2PixZeroPipeline,
|
633 |
+
StableDiffusionSAGPipeline,
|
634 |
+
StableDiffusionUpscalePipeline,
|
635 |
+
StableDiffusionXLAdapterPipeline,
|
636 |
+
StableDiffusionXLControlNetImg2ImgPipeline,
|
637 |
+
StableDiffusionXLControlNetInpaintPipeline,
|
638 |
+
StableDiffusionXLControlNetPipeline,
|
639 |
+
StableDiffusionXLImg2ImgPipeline,
|
640 |
+
StableDiffusionXLInpaintPipeline,
|
641 |
+
StableDiffusionXLInstructPix2PixPipeline,
|
642 |
+
StableDiffusionXLPipeline,
|
643 |
+
StableUnCLIPImg2ImgPipeline,
|
644 |
+
StableUnCLIPPipeline,
|
645 |
+
StableVideoDiffusionPipeline,
|
646 |
+
TextToVideoSDPipeline,
|
647 |
+
TextToVideoZeroPipeline,
|
648 |
+
TextToVideoZeroSDXLPipeline,
|
649 |
+
UnCLIPImageVariationPipeline,
|
650 |
+
UnCLIPPipeline,
|
651 |
+
UniDiffuserModel,
|
652 |
+
UniDiffuserPipeline,
|
653 |
+
UniDiffuserTextDecoder,
|
654 |
+
VersatileDiffusionDualGuidedPipeline,
|
655 |
+
VersatileDiffusionImageVariationPipeline,
|
656 |
+
VersatileDiffusionPipeline,
|
657 |
+
VersatileDiffusionTextToImagePipeline,
|
658 |
+
VideoToVideoSDPipeline,
|
659 |
+
VQDiffusionPipeline,
|
660 |
+
WuerstchenCombinedPipeline,
|
661 |
+
WuerstchenDecoderPipeline,
|
662 |
+
WuerstchenPriorPipeline,
|
663 |
+
)
|
664 |
+
|
665 |
+
try:
|
666 |
+
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
|
667 |
+
raise OptionalDependencyNotAvailable()
|
668 |
+
except OptionalDependencyNotAvailable:
|
669 |
+
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
|
670 |
+
else:
|
671 |
+
from .pipelines import StableDiffusionKDiffusionPipeline
|
672 |
+
|
673 |
+
try:
|
674 |
+
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
|
675 |
+
raise OptionalDependencyNotAvailable()
|
676 |
+
except OptionalDependencyNotAvailable:
|
677 |
+
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
|
678 |
+
else:
|
679 |
+
from .pipelines import (
|
680 |
+
OnnxStableDiffusionImg2ImgPipeline,
|
681 |
+
OnnxStableDiffusionInpaintPipeline,
|
682 |
+
OnnxStableDiffusionInpaintPipelineLegacy,
|
683 |
+
OnnxStableDiffusionPipeline,
|
684 |
+
OnnxStableDiffusionUpscalePipeline,
|
685 |
+
StableDiffusionOnnxPipeline,
|
686 |
+
)
|
687 |
+
|
688 |
+
try:
|
689 |
+
if not (is_torch_available() and is_librosa_available()):
|
690 |
+
raise OptionalDependencyNotAvailable()
|
691 |
+
except OptionalDependencyNotAvailable:
|
692 |
+
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
|
693 |
+
else:
|
694 |
+
from .pipelines import AudioDiffusionPipeline, Mel
|
695 |
+
|
696 |
+
try:
|
697 |
+
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
|
698 |
+
raise OptionalDependencyNotAvailable()
|
699 |
+
except OptionalDependencyNotAvailable:
|
700 |
+
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
|
701 |
+
else:
|
702 |
+
from .pipelines import SpectrogramDiffusionPipeline
|
703 |
+
|
704 |
+
try:
|
705 |
+
if not is_flax_available():
|
706 |
+
raise OptionalDependencyNotAvailable()
|
707 |
+
except OptionalDependencyNotAvailable:
|
708 |
+
from .utils.dummy_flax_objects import * # noqa F403
|
709 |
+
else:
|
710 |
+
from .models.controlnet_flax import FlaxControlNetModel
|
711 |
+
from .models.modeling_flax_utils import FlaxModelMixin
|
712 |
+
from .models.unet_2d_condition_flax import FlaxUNet2DConditionModel
|
713 |
+
from .models.vae_flax import FlaxAutoencoderKL
|
714 |
+
from .pipelines import FlaxDiffusionPipeline
|
715 |
+
from .schedulers import (
|
716 |
+
FlaxDDIMScheduler,
|
717 |
+
FlaxDDPMScheduler,
|
718 |
+
FlaxDPMSolverMultistepScheduler,
|
719 |
+
FlaxEulerDiscreteScheduler,
|
720 |
+
FlaxKarrasVeScheduler,
|
721 |
+
FlaxLMSDiscreteScheduler,
|
722 |
+
FlaxPNDMScheduler,
|
723 |
+
FlaxSchedulerMixin,
|
724 |
+
FlaxScoreSdeVeScheduler,
|
725 |
+
)
|
726 |
+
|
727 |
+
try:
|
728 |
+
if not (is_flax_available() and is_transformers_available()):
|
729 |
+
raise OptionalDependencyNotAvailable()
|
730 |
+
except OptionalDependencyNotAvailable:
|
731 |
+
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
|
732 |
+
else:
|
733 |
+
from .pipelines import (
|
734 |
+
FlaxStableDiffusionControlNetPipeline,
|
735 |
+
FlaxStableDiffusionImg2ImgPipeline,
|
736 |
+
FlaxStableDiffusionInpaintPipeline,
|
737 |
+
FlaxStableDiffusionPipeline,
|
738 |
+
FlaxStableDiffusionXLPipeline,
|
739 |
+
)
|
740 |
+
|
741 |
+
try:
|
742 |
+
if not (is_note_seq_available()):
|
743 |
+
raise OptionalDependencyNotAvailable()
|
744 |
+
except OptionalDependencyNotAvailable:
|
745 |
+
from .utils.dummy_note_seq_objects import * # noqa F403
|
746 |
+
else:
|
747 |
+
from .pipelines import MidiProcessor
|
748 |
+
|
749 |
+
else:
|
750 |
+
import sys
|
751 |
+
|
752 |
+
sys.modules[__name__] = _LazyModule(
|
753 |
+
__name__,
|
754 |
+
globals()["__file__"],
|
755 |
+
_import_structure,
|
756 |
+
module_spec=__spec__,
|
757 |
+
extra_objects={"__version__": __version__},
|
758 |
+
)
|
diffusers/commands/__init__.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from abc import ABC, abstractmethod
|
16 |
+
from argparse import ArgumentParser
|
17 |
+
|
18 |
+
|
19 |
+
class BaseDiffusersCLICommand(ABC):
|
20 |
+
@staticmethod
|
21 |
+
@abstractmethod
|
22 |
+
def register_subcommand(parser: ArgumentParser):
|
23 |
+
raise NotImplementedError()
|
24 |
+
|
25 |
+
@abstractmethod
|
26 |
+
def run(self):
|
27 |
+
raise NotImplementedError()
|
diffusers/commands/diffusers_cli.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
from argparse import ArgumentParser
|
17 |
+
|
18 |
+
from .env import EnvironmentCommand
|
19 |
+
from .fp16_safetensors import FP16SafetensorsCommand
|
20 |
+
|
21 |
+
|
22 |
+
def main():
|
23 |
+
parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
|
24 |
+
commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
|
25 |
+
|
26 |
+
# Register commands
|
27 |
+
EnvironmentCommand.register_subcommand(commands_parser)
|
28 |
+
FP16SafetensorsCommand.register_subcommand(commands_parser)
|
29 |
+
|
30 |
+
# Let's go
|
31 |
+
args = parser.parse_args()
|
32 |
+
|
33 |
+
if not hasattr(args, "func"):
|
34 |
+
parser.print_help()
|
35 |
+
exit(1)
|
36 |
+
|
37 |
+
# Run
|
38 |
+
service = args.func(args)
|
39 |
+
service.run()
|
40 |
+
|
41 |
+
|
42 |
+
if __name__ == "__main__":
|
43 |
+
main()
|
diffusers/commands/env.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import platform
|
16 |
+
from argparse import ArgumentParser
|
17 |
+
|
18 |
+
import huggingface_hub
|
19 |
+
|
20 |
+
from .. import __version__ as version
|
21 |
+
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
|
22 |
+
from . import BaseDiffusersCLICommand
|
23 |
+
|
24 |
+
|
25 |
+
def info_command_factory(_):
|
26 |
+
return EnvironmentCommand()
|
27 |
+
|
28 |
+
|
29 |
+
class EnvironmentCommand(BaseDiffusersCLICommand):
|
30 |
+
@staticmethod
|
31 |
+
def register_subcommand(parser: ArgumentParser):
|
32 |
+
download_parser = parser.add_parser("env")
|
33 |
+
download_parser.set_defaults(func=info_command_factory)
|
34 |
+
|
35 |
+
def run(self):
|
36 |
+
hub_version = huggingface_hub.__version__
|
37 |
+
|
38 |
+
pt_version = "not installed"
|
39 |
+
pt_cuda_available = "NA"
|
40 |
+
if is_torch_available():
|
41 |
+
import torch
|
42 |
+
|
43 |
+
pt_version = torch.__version__
|
44 |
+
pt_cuda_available = torch.cuda.is_available()
|
45 |
+
|
46 |
+
transformers_version = "not installed"
|
47 |
+
if is_transformers_available():
|
48 |
+
import transformers
|
49 |
+
|
50 |
+
transformers_version = transformers.__version__
|
51 |
+
|
52 |
+
accelerate_version = "not installed"
|
53 |
+
if is_accelerate_available():
|
54 |
+
import accelerate
|
55 |
+
|
56 |
+
accelerate_version = accelerate.__version__
|
57 |
+
|
58 |
+
xformers_version = "not installed"
|
59 |
+
if is_xformers_available():
|
60 |
+
import xformers
|
61 |
+
|
62 |
+
xformers_version = xformers.__version__
|
63 |
+
|
64 |
+
info = {
|
65 |
+
"`diffusers` version": version,
|
66 |
+
"Platform": platform.platform(),
|
67 |
+
"Python version": platform.python_version(),
|
68 |
+
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
|
69 |
+
"Huggingface_hub version": hub_version,
|
70 |
+
"Transformers version": transformers_version,
|
71 |
+
"Accelerate version": accelerate_version,
|
72 |
+
"xFormers version": xformers_version,
|
73 |
+
"Using GPU in script?": "<fill in>",
|
74 |
+
"Using distributed or parallel set-up in script?": "<fill in>",
|
75 |
+
}
|
76 |
+
|
77 |
+
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
|
78 |
+
print(self.format_dict(info))
|
79 |
+
|
80 |
+
return info
|
81 |
+
|
82 |
+
@staticmethod
|
83 |
+
def format_dict(d):
|
84 |
+
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
|
diffusers/commands/fp16_safetensors.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
"""
|
16 |
+
Usage example:
|
17 |
+
diffusers-cli fp16_safetensors --ckpt_id=openai/shap-e --fp16 --use_safetensors
|
18 |
+
"""
|
19 |
+
|
20 |
+
import glob
|
21 |
+
import json
|
22 |
+
import warnings
|
23 |
+
from argparse import ArgumentParser, Namespace
|
24 |
+
from importlib import import_module
|
25 |
+
|
26 |
+
import huggingface_hub
|
27 |
+
import torch
|
28 |
+
from huggingface_hub import hf_hub_download
|
29 |
+
from packaging import version
|
30 |
+
|
31 |
+
from ..utils import logging
|
32 |
+
from . import BaseDiffusersCLICommand
|
33 |
+
|
34 |
+
|
35 |
+
def conversion_command_factory(args: Namespace):
|
36 |
+
if args.use_auth_token:
|
37 |
+
warnings.warn(
|
38 |
+
"The `--use_auth_token` flag is deprecated and will be removed in a future version. Authentication is now"
|
39 |
+
" handled automatically if user is logged in."
|
40 |
+
)
|
41 |
+
return FP16SafetensorsCommand(args.ckpt_id, args.fp16, args.use_safetensors)
|
42 |
+
|
43 |
+
|
44 |
+
class FP16SafetensorsCommand(BaseDiffusersCLICommand):
|
45 |
+
@staticmethod
|
46 |
+
def register_subcommand(parser: ArgumentParser):
|
47 |
+
conversion_parser = parser.add_parser("fp16_safetensors")
|
48 |
+
conversion_parser.add_argument(
|
49 |
+
"--ckpt_id",
|
50 |
+
type=str,
|
51 |
+
help="Repo id of the checkpoints on which to run the conversion. Example: 'openai/shap-e'.",
|
52 |
+
)
|
53 |
+
conversion_parser.add_argument(
|
54 |
+
"--fp16", action="store_true", help="If serializing the variables in FP16 precision."
|
55 |
+
)
|
56 |
+
conversion_parser.add_argument(
|
57 |
+
"--use_safetensors", action="store_true", help="If serializing in the safetensors format."
|
58 |
+
)
|
59 |
+
conversion_parser.add_argument(
|
60 |
+
"--use_auth_token",
|
61 |
+
action="store_true",
|
62 |
+
help="When working with checkpoints having private visibility. When used `huggingface-cli login` needs to be run beforehand.",
|
63 |
+
)
|
64 |
+
conversion_parser.set_defaults(func=conversion_command_factory)
|
65 |
+
|
66 |
+
def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool):
|
67 |
+
self.logger = logging.get_logger("diffusers-cli/fp16_safetensors")
|
68 |
+
self.ckpt_id = ckpt_id
|
69 |
+
self.local_ckpt_dir = f"/tmp/{ckpt_id}"
|
70 |
+
self.fp16 = fp16
|
71 |
+
|
72 |
+
self.use_safetensors = use_safetensors
|
73 |
+
|
74 |
+
if not self.use_safetensors and not self.fp16:
|
75 |
+
raise NotImplementedError(
|
76 |
+
"When `use_safetensors` and `fp16` both are False, then this command is of no use."
|
77 |
+
)
|
78 |
+
|
79 |
+
def run(self):
|
80 |
+
if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"):
|
81 |
+
raise ImportError(
|
82 |
+
"The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub"
|
83 |
+
" installation."
|
84 |
+
)
|
85 |
+
else:
|
86 |
+
from huggingface_hub import create_commit
|
87 |
+
from huggingface_hub._commit_api import CommitOperationAdd
|
88 |
+
|
89 |
+
model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json")
|
90 |
+
with open(model_index, "r") as f:
|
91 |
+
pipeline_class_name = json.load(f)["_class_name"]
|
92 |
+
pipeline_class = getattr(import_module("diffusers"), pipeline_class_name)
|
93 |
+
self.logger.info(f"Pipeline class imported: {pipeline_class_name}.")
|
94 |
+
|
95 |
+
# Load the appropriate pipeline. We could have use `DiffusionPipeline`
|
96 |
+
# here, but just to avoid any rough edge cases.
|
97 |
+
pipeline = pipeline_class.from_pretrained(
|
98 |
+
self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32
|
99 |
+
)
|
100 |
+
pipeline.save_pretrained(
|
101 |
+
self.local_ckpt_dir,
|
102 |
+
safe_serialization=True if self.use_safetensors else False,
|
103 |
+
variant="fp16" if self.fp16 else None,
|
104 |
+
)
|
105 |
+
self.logger.info(f"Pipeline locally saved to {self.local_ckpt_dir}.")
|
106 |
+
|
107 |
+
# Fetch all the paths.
|
108 |
+
if self.fp16:
|
109 |
+
modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.fp16.*")
|
110 |
+
elif self.use_safetensors:
|
111 |
+
modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.safetensors")
|
112 |
+
|
113 |
+
# Prepare for the PR.
|
114 |
+
commit_message = f"Serialize variables with FP16: {self.fp16} and safetensors: {self.use_safetensors}."
|
115 |
+
operations = []
|
116 |
+
for path in modified_paths:
|
117 |
+
operations.append(CommitOperationAdd(path_in_repo="/".join(path.split("/")[4:]), path_or_fileobj=path))
|
118 |
+
|
119 |
+
# Open the PR.
|
120 |
+
commit_description = (
|
121 |
+
"Variables converted by the [`diffusers`' `fp16_safetensors`"
|
122 |
+
" CLI](https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/fp16_safetensors.py)."
|
123 |
+
)
|
124 |
+
hub_pr_url = create_commit(
|
125 |
+
repo_id=self.ckpt_id,
|
126 |
+
operations=operations,
|
127 |
+
commit_message=commit_message,
|
128 |
+
commit_description=commit_description,
|
129 |
+
repo_type="model",
|
130 |
+
create_pr=True,
|
131 |
+
).pr_url
|
132 |
+
self.logger.info(f"PR created here: {hub_pr_url}.")
|
diffusers/configuration_utils.py
ADDED
@@ -0,0 +1,699 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" ConfigMixin base class and utilities."""
|
17 |
+
import dataclasses
|
18 |
+
import functools
|
19 |
+
import importlib
|
20 |
+
import inspect
|
21 |
+
import json
|
22 |
+
import os
|
23 |
+
import re
|
24 |
+
from collections import OrderedDict
|
25 |
+
from pathlib import PosixPath
|
26 |
+
from typing import Any, Dict, Tuple, Union
|
27 |
+
|
28 |
+
import numpy as np
|
29 |
+
from huggingface_hub import create_repo, hf_hub_download
|
30 |
+
from huggingface_hub.utils import (
|
31 |
+
EntryNotFoundError,
|
32 |
+
RepositoryNotFoundError,
|
33 |
+
RevisionNotFoundError,
|
34 |
+
validate_hf_hub_args,
|
35 |
+
)
|
36 |
+
from requests import HTTPError
|
37 |
+
|
38 |
+
from . import __version__
|
39 |
+
from .utils import (
|
40 |
+
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
|
41 |
+
DummyObject,
|
42 |
+
deprecate,
|
43 |
+
extract_commit_hash,
|
44 |
+
http_user_agent,
|
45 |
+
logging,
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
logger = logging.get_logger(__name__)
|
50 |
+
|
51 |
+
_re_configuration_file = re.compile(r"config\.(.*)\.json")
|
52 |
+
|
53 |
+
|
54 |
+
class FrozenDict(OrderedDict):
|
55 |
+
def __init__(self, *args, **kwargs):
|
56 |
+
super().__init__(*args, **kwargs)
|
57 |
+
|
58 |
+
for key, value in self.items():
|
59 |
+
setattr(self, key, value)
|
60 |
+
|
61 |
+
self.__frozen = True
|
62 |
+
|
63 |
+
def __delitem__(self, *args, **kwargs):
|
64 |
+
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
|
65 |
+
|
66 |
+
def setdefault(self, *args, **kwargs):
|
67 |
+
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
|
68 |
+
|
69 |
+
def pop(self, *args, **kwargs):
|
70 |
+
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
|
71 |
+
|
72 |
+
def update(self, *args, **kwargs):
|
73 |
+
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
|
74 |
+
|
75 |
+
def __setattr__(self, name, value):
|
76 |
+
if hasattr(self, "__frozen") and self.__frozen:
|
77 |
+
raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.")
|
78 |
+
super().__setattr__(name, value)
|
79 |
+
|
80 |
+
def __setitem__(self, name, value):
|
81 |
+
if hasattr(self, "__frozen") and self.__frozen:
|
82 |
+
raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.")
|
83 |
+
super().__setitem__(name, value)
|
84 |
+
|
85 |
+
|
86 |
+
class ConfigMixin:
|
87 |
+
r"""
|
88 |
+
Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also
|
89 |
+
provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and
|
90 |
+
saving classes that inherit from [`ConfigMixin`].
|
91 |
+
|
92 |
+
Class attributes:
|
93 |
+
- **config_name** (`str`) -- A filename under which the config should stored when calling
|
94 |
+
[`~ConfigMixin.save_config`] (should be overridden by parent class).
|
95 |
+
- **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be
|
96 |
+
overridden by subclass).
|
97 |
+
- **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).
|
98 |
+
- **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function
|
99 |
+
should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by
|
100 |
+
subclass).
|
101 |
+
"""
|
102 |
+
|
103 |
+
config_name = None
|
104 |
+
ignore_for_config = []
|
105 |
+
has_compatibles = False
|
106 |
+
|
107 |
+
_deprecated_kwargs = []
|
108 |
+
|
109 |
+
def register_to_config(self, **kwargs):
|
110 |
+
if self.config_name is None:
|
111 |
+
raise NotImplementedError(f"Make sure that {self.__class__} has defined a class name `config_name`")
|
112 |
+
# Special case for `kwargs` used in deprecation warning added to schedulers
|
113 |
+
# TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,
|
114 |
+
# or solve in a more general way.
|
115 |
+
kwargs.pop("kwargs", None)
|
116 |
+
|
117 |
+
if not hasattr(self, "_internal_dict"):
|
118 |
+
internal_dict = kwargs
|
119 |
+
else:
|
120 |
+
previous_dict = dict(self._internal_dict)
|
121 |
+
internal_dict = {**self._internal_dict, **kwargs}
|
122 |
+
logger.debug(f"Updating config from {previous_dict} to {internal_dict}")
|
123 |
+
|
124 |
+
self._internal_dict = FrozenDict(internal_dict)
|
125 |
+
|
126 |
+
def __getattr__(self, name: str) -> Any:
|
127 |
+
"""The only reason we overwrite `getattr` here is to gracefully deprecate accessing
|
128 |
+
config attributes directly. See https://github.com/huggingface/diffusers/pull/3129
|
129 |
+
|
130 |
+
Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:
|
131 |
+
https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module
|
132 |
+
"""
|
133 |
+
|
134 |
+
is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name)
|
135 |
+
is_attribute = name in self.__dict__
|
136 |
+
|
137 |
+
if is_in_config and not is_attribute:
|
138 |
+
deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'."
|
139 |
+
deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False)
|
140 |
+
return self._internal_dict[name]
|
141 |
+
|
142 |
+
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
|
143 |
+
|
144 |
+
def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
|
145 |
+
"""
|
146 |
+
Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the
|
147 |
+
[`~ConfigMixin.from_config`] class method.
|
148 |
+
|
149 |
+
Args:
|
150 |
+
save_directory (`str` or `os.PathLike`):
|
151 |
+
Directory where the configuration JSON file is saved (will be created if it does not exist).
|
152 |
+
push_to_hub (`bool`, *optional*, defaults to `False`):
|
153 |
+
Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
|
154 |
+
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
|
155 |
+
namespace).
|
156 |
+
kwargs (`Dict[str, Any]`, *optional*):
|
157 |
+
Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
|
158 |
+
"""
|
159 |
+
if os.path.isfile(save_directory):
|
160 |
+
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
|
161 |
+
|
162 |
+
os.makedirs(save_directory, exist_ok=True)
|
163 |
+
|
164 |
+
# If we save using the predefined names, we can load using `from_config`
|
165 |
+
output_config_file = os.path.join(save_directory, self.config_name)
|
166 |
+
|
167 |
+
self.to_json_file(output_config_file)
|
168 |
+
logger.info(f"Configuration saved in {output_config_file}")
|
169 |
+
|
170 |
+
if push_to_hub:
|
171 |
+
commit_message = kwargs.pop("commit_message", None)
|
172 |
+
private = kwargs.pop("private", False)
|
173 |
+
create_pr = kwargs.pop("create_pr", False)
|
174 |
+
token = kwargs.pop("token", None)
|
175 |
+
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
|
176 |
+
repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
|
177 |
+
|
178 |
+
self._upload_folder(
|
179 |
+
save_directory,
|
180 |
+
repo_id,
|
181 |
+
token=token,
|
182 |
+
commit_message=commit_message,
|
183 |
+
create_pr=create_pr,
|
184 |
+
)
|
185 |
+
|
186 |
+
@classmethod
|
187 |
+
def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):
|
188 |
+
r"""
|
189 |
+
Instantiate a Python class from a config dictionary.
|
190 |
+
|
191 |
+
Parameters:
|
192 |
+
config (`Dict[str, Any]`):
|
193 |
+
A config dictionary from which the Python class is instantiated. Make sure to only load configuration
|
194 |
+
files of compatible classes.
|
195 |
+
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
|
196 |
+
Whether kwargs that are not consumed by the Python class should be returned or not.
|
197 |
+
kwargs (remaining dictionary of keyword arguments, *optional*):
|
198 |
+
Can be used to update the configuration object (after it is loaded) and initiate the Python class.
|
199 |
+
`**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually
|
200 |
+
overwrite the same named arguments in `config`.
|
201 |
+
|
202 |
+
Returns:
|
203 |
+
[`ModelMixin`] or [`SchedulerMixin`]:
|
204 |
+
A model or scheduler object instantiated from a config dictionary.
|
205 |
+
|
206 |
+
Examples:
|
207 |
+
|
208 |
+
```python
|
209 |
+
>>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler
|
210 |
+
|
211 |
+
>>> # Download scheduler from huggingface.co and cache.
|
212 |
+
>>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cifar10-32")
|
213 |
+
|
214 |
+
>>> # Instantiate DDIM scheduler class with same config as DDPM
|
215 |
+
>>> scheduler = DDIMScheduler.from_config(scheduler.config)
|
216 |
+
|
217 |
+
>>> # Instantiate PNDM scheduler class with same config as DDPM
|
218 |
+
>>> scheduler = PNDMScheduler.from_config(scheduler.config)
|
219 |
+
```
|
220 |
+
"""
|
221 |
+
# <===== TO BE REMOVED WITH DEPRECATION
|
222 |
+
# TODO(Patrick) - make sure to remove the following lines when config=="model_path" is deprecated
|
223 |
+
if "pretrained_model_name_or_path" in kwargs:
|
224 |
+
config = kwargs.pop("pretrained_model_name_or_path")
|
225 |
+
|
226 |
+
if config is None:
|
227 |
+
raise ValueError("Please make sure to provide a config as the first positional argument.")
|
228 |
+
# ======>
|
229 |
+
|
230 |
+
if not isinstance(config, dict):
|
231 |
+
deprecation_message = "It is deprecated to pass a pretrained model name or path to `from_config`."
|
232 |
+
if "Scheduler" in cls.__name__:
|
233 |
+
deprecation_message += (
|
234 |
+
f"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead."
|
235 |
+
" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will"
|
236 |
+
" be removed in v1.0.0."
|
237 |
+
)
|
238 |
+
elif "Model" in cls.__name__:
|
239 |
+
deprecation_message += (
|
240 |
+
f"If you were trying to load a model, please use {cls}.load_config(...) followed by"
|
241 |
+
f" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary"
|
242 |
+
" instead. This functionality will be removed in v1.0.0."
|
243 |
+
)
|
244 |
+
deprecate("config-passed-as-path", "1.0.0", deprecation_message, standard_warn=False)
|
245 |
+
config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)
|
246 |
+
|
247 |
+
init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)
|
248 |
+
|
249 |
+
# Allow dtype to be specified on initialization
|
250 |
+
if "dtype" in unused_kwargs:
|
251 |
+
init_dict["dtype"] = unused_kwargs.pop("dtype")
|
252 |
+
|
253 |
+
# add possible deprecated kwargs
|
254 |
+
for deprecated_kwarg in cls._deprecated_kwargs:
|
255 |
+
if deprecated_kwarg in unused_kwargs:
|
256 |
+
init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)
|
257 |
+
|
258 |
+
# Return model and optionally state and/or unused_kwargs
|
259 |
+
model = cls(**init_dict)
|
260 |
+
|
261 |
+
# make sure to also save config parameters that might be used for compatible classes
|
262 |
+
model.register_to_config(**hidden_dict)
|
263 |
+
|
264 |
+
# add hidden kwargs of compatible classes to unused_kwargs
|
265 |
+
unused_kwargs = {**unused_kwargs, **hidden_dict}
|
266 |
+
|
267 |
+
if return_unused_kwargs:
|
268 |
+
return (model, unused_kwargs)
|
269 |
+
else:
|
270 |
+
return model
|
271 |
+
|
272 |
+
@classmethod
|
273 |
+
def get_config_dict(cls, *args, **kwargs):
|
274 |
+
deprecation_message = (
|
275 |
+
f" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be"
|
276 |
+
" removed in version v1.0.0"
|
277 |
+
)
|
278 |
+
deprecate("get_config_dict", "1.0.0", deprecation_message, standard_warn=False)
|
279 |
+
return cls.load_config(*args, **kwargs)
|
280 |
+
|
281 |
+
@classmethod
|
282 |
+
@validate_hf_hub_args
|
283 |
+
def load_config(
|
284 |
+
cls,
|
285 |
+
pretrained_model_name_or_path: Union[str, os.PathLike],
|
286 |
+
return_unused_kwargs=False,
|
287 |
+
return_commit_hash=False,
|
288 |
+
**kwargs,
|
289 |
+
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
290 |
+
r"""
|
291 |
+
Load a model or scheduler configuration.
|
292 |
+
|
293 |
+
Parameters:
|
294 |
+
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
|
295 |
+
Can be either:
|
296 |
+
|
297 |
+
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
|
298 |
+
the Hub.
|
299 |
+
- A path to a *directory* (for example `./my_model_directory`) containing model weights saved with
|
300 |
+
[`~ConfigMixin.save_config`].
|
301 |
+
|
302 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
303 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
304 |
+
is not used.
|
305 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
306 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
307 |
+
cached versions if they exist.
|
308 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
309 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
310 |
+
incompletely downloaded files are deleted.
|
311 |
+
proxies (`Dict[str, str]`, *optional*):
|
312 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
313 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
314 |
+
output_loading_info(`bool`, *optional*, defaults to `False`):
|
315 |
+
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
|
316 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
317 |
+
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
318 |
+
won't be downloaded from the Hub.
|
319 |
+
token (`str` or *bool*, *optional*):
|
320 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
321 |
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
322 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
323 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
324 |
+
allowed by Git.
|
325 |
+
subfolder (`str`, *optional*, defaults to `""`):
|
326 |
+
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
327 |
+
return_unused_kwargs (`bool`, *optional*, defaults to `False):
|
328 |
+
Whether unused keyword arguments of the config are returned.
|
329 |
+
return_commit_hash (`bool`, *optional*, defaults to `False):
|
330 |
+
Whether the `commit_hash` of the loaded configuration are returned.
|
331 |
+
|
332 |
+
Returns:
|
333 |
+
`dict`:
|
334 |
+
A dictionary of all the parameters stored in a JSON configuration file.
|
335 |
+
|
336 |
+
"""
|
337 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
338 |
+
force_download = kwargs.pop("force_download", False)
|
339 |
+
resume_download = kwargs.pop("resume_download", False)
|
340 |
+
proxies = kwargs.pop("proxies", None)
|
341 |
+
token = kwargs.pop("token", None)
|
342 |
+
local_files_only = kwargs.pop("local_files_only", False)
|
343 |
+
revision = kwargs.pop("revision", None)
|
344 |
+
_ = kwargs.pop("mirror", None)
|
345 |
+
subfolder = kwargs.pop("subfolder", None)
|
346 |
+
user_agent = kwargs.pop("user_agent", {})
|
347 |
+
|
348 |
+
user_agent = {**user_agent, "file_type": "config"}
|
349 |
+
user_agent = http_user_agent(user_agent)
|
350 |
+
|
351 |
+
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
|
352 |
+
|
353 |
+
if cls.config_name is None:
|
354 |
+
raise ValueError(
|
355 |
+
"`self.config_name` is not defined. Note that one should not load a config from "
|
356 |
+
"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`"
|
357 |
+
)
|
358 |
+
|
359 |
+
if os.path.isfile(pretrained_model_name_or_path):
|
360 |
+
config_file = pretrained_model_name_or_path
|
361 |
+
elif os.path.isdir(pretrained_model_name_or_path):
|
362 |
+
if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):
|
363 |
+
# Load from a PyTorch checkpoint
|
364 |
+
config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)
|
365 |
+
elif subfolder is not None and os.path.isfile(
|
366 |
+
os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)
|
367 |
+
):
|
368 |
+
config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)
|
369 |
+
else:
|
370 |
+
raise EnvironmentError(
|
371 |
+
f"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}."
|
372 |
+
)
|
373 |
+
else:
|
374 |
+
try:
|
375 |
+
# Load from URL or cache if already cached
|
376 |
+
config_file = hf_hub_download(
|
377 |
+
pretrained_model_name_or_path,
|
378 |
+
filename=cls.config_name,
|
379 |
+
cache_dir=cache_dir,
|
380 |
+
force_download=force_download,
|
381 |
+
proxies=proxies,
|
382 |
+
resume_download=resume_download,
|
383 |
+
local_files_only=local_files_only,
|
384 |
+
token=token,
|
385 |
+
user_agent=user_agent,
|
386 |
+
subfolder=subfolder,
|
387 |
+
revision=revision,
|
388 |
+
)
|
389 |
+
except RepositoryNotFoundError:
|
390 |
+
raise EnvironmentError(
|
391 |
+
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier"
|
392 |
+
" listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a"
|
393 |
+
" token having permission to this repo with `token` or log in with `huggingface-cli login`."
|
394 |
+
)
|
395 |
+
except RevisionNotFoundError:
|
396 |
+
raise EnvironmentError(
|
397 |
+
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for"
|
398 |
+
" this model name. Check the model page at"
|
399 |
+
f" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
|
400 |
+
)
|
401 |
+
except EntryNotFoundError:
|
402 |
+
raise EnvironmentError(
|
403 |
+
f"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}."
|
404 |
+
)
|
405 |
+
except HTTPError as err:
|
406 |
+
raise EnvironmentError(
|
407 |
+
"There was a specific connection error when trying to load"
|
408 |
+
f" {pretrained_model_name_or_path}:\n{err}"
|
409 |
+
)
|
410 |
+
except ValueError:
|
411 |
+
raise EnvironmentError(
|
412 |
+
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
|
413 |
+
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
|
414 |
+
f" directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to"
|
415 |
+
" run the library in offline mode at"
|
416 |
+
" 'https://huggingface.co/docs/diffusers/installation#offline-mode'."
|
417 |
+
)
|
418 |
+
except EnvironmentError:
|
419 |
+
raise EnvironmentError(
|
420 |
+
f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from "
|
421 |
+
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
|
422 |
+
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
|
423 |
+
f"containing a {cls.config_name} file"
|
424 |
+
)
|
425 |
+
|
426 |
+
try:
|
427 |
+
# Load config dict
|
428 |
+
config_dict = cls._dict_from_json_file(config_file)
|
429 |
+
|
430 |
+
commit_hash = extract_commit_hash(config_file)
|
431 |
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
432 |
+
raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.")
|
433 |
+
|
434 |
+
if not (return_unused_kwargs or return_commit_hash):
|
435 |
+
return config_dict
|
436 |
+
|
437 |
+
outputs = (config_dict,)
|
438 |
+
|
439 |
+
if return_unused_kwargs:
|
440 |
+
outputs += (kwargs,)
|
441 |
+
|
442 |
+
if return_commit_hash:
|
443 |
+
outputs += (commit_hash,)
|
444 |
+
|
445 |
+
return outputs
|
446 |
+
|
447 |
+
@staticmethod
|
448 |
+
def _get_init_keys(cls):
|
449 |
+
return set(dict(inspect.signature(cls.__init__).parameters).keys())
|
450 |
+
|
451 |
+
@classmethod
|
452 |
+
def extract_init_dict(cls, config_dict, **kwargs):
|
453 |
+
# Skip keys that were not present in the original config, so default __init__ values were used
|
454 |
+
used_defaults = config_dict.get("_use_default_values", [])
|
455 |
+
config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != "_use_default_values"}
|
456 |
+
|
457 |
+
# 0. Copy origin config dict
|
458 |
+
original_dict = dict(config_dict.items())
|
459 |
+
|
460 |
+
# 1. Retrieve expected config attributes from __init__ signature
|
461 |
+
expected_keys = cls._get_init_keys(cls)
|
462 |
+
expected_keys.remove("self")
|
463 |
+
# remove general kwargs if present in dict
|
464 |
+
if "kwargs" in expected_keys:
|
465 |
+
expected_keys.remove("kwargs")
|
466 |
+
# remove flax internal keys
|
467 |
+
if hasattr(cls, "_flax_internal_args"):
|
468 |
+
for arg in cls._flax_internal_args:
|
469 |
+
expected_keys.remove(arg)
|
470 |
+
|
471 |
+
# 2. Remove attributes that cannot be expected from expected config attributes
|
472 |
+
# remove keys to be ignored
|
473 |
+
if len(cls.ignore_for_config) > 0:
|
474 |
+
expected_keys = expected_keys - set(cls.ignore_for_config)
|
475 |
+
|
476 |
+
# load diffusers library to import compatible and original scheduler
|
477 |
+
diffusers_library = importlib.import_module(__name__.split(".")[0])
|
478 |
+
|
479 |
+
if cls.has_compatibles:
|
480 |
+
compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]
|
481 |
+
else:
|
482 |
+
compatible_classes = []
|
483 |
+
|
484 |
+
expected_keys_comp_cls = set()
|
485 |
+
for c in compatible_classes:
|
486 |
+
expected_keys_c = cls._get_init_keys(c)
|
487 |
+
expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)
|
488 |
+
expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)
|
489 |
+
config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}
|
490 |
+
|
491 |
+
# remove attributes from orig class that cannot be expected
|
492 |
+
orig_cls_name = config_dict.pop("_class_name", cls.__name__)
|
493 |
+
if (
|
494 |
+
isinstance(orig_cls_name, str)
|
495 |
+
and orig_cls_name != cls.__name__
|
496 |
+
and hasattr(diffusers_library, orig_cls_name)
|
497 |
+
):
|
498 |
+
orig_cls = getattr(diffusers_library, orig_cls_name)
|
499 |
+
unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys
|
500 |
+
config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}
|
501 |
+
elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):
|
502 |
+
raise ValueError(
|
503 |
+
"Make sure that the `_class_name` is of type string or list of string (for custom pipelines)."
|
504 |
+
)
|
505 |
+
|
506 |
+
# remove private attributes
|
507 |
+
config_dict = {k: v for k, v in config_dict.items() if not k.startswith("_")}
|
508 |
+
|
509 |
+
# 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments
|
510 |
+
init_dict = {}
|
511 |
+
for key in expected_keys:
|
512 |
+
# if config param is passed to kwarg and is present in config dict
|
513 |
+
# it should overwrite existing config dict key
|
514 |
+
if key in kwargs and key in config_dict:
|
515 |
+
config_dict[key] = kwargs.pop(key)
|
516 |
+
|
517 |
+
if key in kwargs:
|
518 |
+
# overwrite key
|
519 |
+
init_dict[key] = kwargs.pop(key)
|
520 |
+
elif key in config_dict:
|
521 |
+
# use value from config dict
|
522 |
+
init_dict[key] = config_dict.pop(key)
|
523 |
+
|
524 |
+
# 4. Give nice warning if unexpected values have been passed
|
525 |
+
if len(config_dict) > 0:
|
526 |
+
logger.warning(
|
527 |
+
f"The config attributes {config_dict} were passed to {cls.__name__}, "
|
528 |
+
"but are not expected and will be ignored. Please verify your "
|
529 |
+
f"{cls.config_name} configuration file."
|
530 |
+
)
|
531 |
+
|
532 |
+
# 5. Give nice info if config attributes are initiliazed to default because they have not been passed
|
533 |
+
passed_keys = set(init_dict.keys())
|
534 |
+
if len(expected_keys - passed_keys) > 0:
|
535 |
+
logger.info(
|
536 |
+
f"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values."
|
537 |
+
)
|
538 |
+
|
539 |
+
# 6. Define unused keyword arguments
|
540 |
+
unused_kwargs = {**config_dict, **kwargs}
|
541 |
+
|
542 |
+
# 7. Define "hidden" config parameters that were saved for compatible classes
|
543 |
+
hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}
|
544 |
+
|
545 |
+
return init_dict, unused_kwargs, hidden_config_dict
|
546 |
+
|
547 |
+
@classmethod
|
548 |
+
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
|
549 |
+
with open(json_file, "r", encoding="utf-8") as reader:
|
550 |
+
text = reader.read()
|
551 |
+
return json.loads(text)
|
552 |
+
|
553 |
+
def __repr__(self):
|
554 |
+
return f"{self.__class__.__name__} {self.to_json_string()}"
|
555 |
+
|
556 |
+
@property
|
557 |
+
def config(self) -> Dict[str, Any]:
|
558 |
+
"""
|
559 |
+
Returns the config of the class as a frozen dictionary
|
560 |
+
|
561 |
+
Returns:
|
562 |
+
`Dict[str, Any]`: Config of the class.
|
563 |
+
"""
|
564 |
+
return self._internal_dict
|
565 |
+
|
566 |
+
def to_json_string(self) -> str:
|
567 |
+
"""
|
568 |
+
Serializes the configuration instance to a JSON string.
|
569 |
+
|
570 |
+
Returns:
|
571 |
+
`str`:
|
572 |
+
String containing all the attributes that make up the configuration instance in JSON format.
|
573 |
+
"""
|
574 |
+
config_dict = self._internal_dict if hasattr(self, "_internal_dict") else {}
|
575 |
+
config_dict["_class_name"] = self.__class__.__name__
|
576 |
+
config_dict["_diffusers_version"] = __version__
|
577 |
+
|
578 |
+
def to_json_saveable(value):
|
579 |
+
if isinstance(value, np.ndarray):
|
580 |
+
value = value.tolist()
|
581 |
+
elif isinstance(value, PosixPath):
|
582 |
+
value = str(value)
|
583 |
+
return value
|
584 |
+
|
585 |
+
config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}
|
586 |
+
# Don't save "_ignore_files" or "_use_default_values"
|
587 |
+
config_dict.pop("_ignore_files", None)
|
588 |
+
config_dict.pop("_use_default_values", None)
|
589 |
+
|
590 |
+
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
|
591 |
+
|
592 |
+
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
|
593 |
+
"""
|
594 |
+
Save the configuration instance's parameters to a JSON file.
|
595 |
+
|
596 |
+
Args:
|
597 |
+
json_file_path (`str` or `os.PathLike`):
|
598 |
+
Path to the JSON file to save a configuration instance's parameters.
|
599 |
+
"""
|
600 |
+
with open(json_file_path, "w", encoding="utf-8") as writer:
|
601 |
+
writer.write(self.to_json_string())
|
602 |
+
|
603 |
+
|
604 |
+
def register_to_config(init):
|
605 |
+
r"""
|
606 |
+
Decorator to apply on the init of classes inheriting from [`ConfigMixin`] so that all the arguments are
|
607 |
+
automatically sent to `self.register_for_config`. To ignore a specific argument accepted by the init but that
|
608 |
+
shouldn't be registered in the config, use the `ignore_for_config` class variable
|
609 |
+
|
610 |
+
Warning: Once decorated, all private arguments (beginning with an underscore) are trashed and not sent to the init!
|
611 |
+
"""
|
612 |
+
|
613 |
+
@functools.wraps(init)
|
614 |
+
def inner_init(self, *args, **kwargs):
|
615 |
+
# Ignore private kwargs in the init.
|
616 |
+
init_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
|
617 |
+
config_init_kwargs = {k: v for k, v in kwargs.items() if k.startswith("_")}
|
618 |
+
if not isinstance(self, ConfigMixin):
|
619 |
+
raise RuntimeError(
|
620 |
+
f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does "
|
621 |
+
"not inherit from `ConfigMixin`."
|
622 |
+
)
|
623 |
+
|
624 |
+
ignore = getattr(self, "ignore_for_config", [])
|
625 |
+
# Get positional arguments aligned with kwargs
|
626 |
+
new_kwargs = {}
|
627 |
+
signature = inspect.signature(init)
|
628 |
+
parameters = {
|
629 |
+
name: p.default for i, (name, p) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore
|
630 |
+
}
|
631 |
+
for arg, name in zip(args, parameters.keys()):
|
632 |
+
new_kwargs[name] = arg
|
633 |
+
|
634 |
+
# Then add all kwargs
|
635 |
+
new_kwargs.update(
|
636 |
+
{
|
637 |
+
k: init_kwargs.get(k, default)
|
638 |
+
for k, default in parameters.items()
|
639 |
+
if k not in ignore and k not in new_kwargs
|
640 |
+
}
|
641 |
+
)
|
642 |
+
|
643 |
+
# Take note of the parameters that were not present in the loaded config
|
644 |
+
if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0:
|
645 |
+
new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs))
|
646 |
+
|
647 |
+
new_kwargs = {**config_init_kwargs, **new_kwargs}
|
648 |
+
getattr(self, "register_to_config")(**new_kwargs)
|
649 |
+
init(self, *args, **init_kwargs)
|
650 |
+
|
651 |
+
return inner_init
|
652 |
+
|
653 |
+
|
654 |
+
def flax_register_to_config(cls):
|
655 |
+
original_init = cls.__init__
|
656 |
+
|
657 |
+
@functools.wraps(original_init)
|
658 |
+
def init(self, *args, **kwargs):
|
659 |
+
if not isinstance(self, ConfigMixin):
|
660 |
+
raise RuntimeError(
|
661 |
+
f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does "
|
662 |
+
"not inherit from `ConfigMixin`."
|
663 |
+
)
|
664 |
+
|
665 |
+
# Ignore private kwargs in the init. Retrieve all passed attributes
|
666 |
+
init_kwargs = dict(kwargs.items())
|
667 |
+
|
668 |
+
# Retrieve default values
|
669 |
+
fields = dataclasses.fields(self)
|
670 |
+
default_kwargs = {}
|
671 |
+
for field in fields:
|
672 |
+
# ignore flax specific attributes
|
673 |
+
if field.name in self._flax_internal_args:
|
674 |
+
continue
|
675 |
+
if type(field.default) == dataclasses._MISSING_TYPE:
|
676 |
+
default_kwargs[field.name] = None
|
677 |
+
else:
|
678 |
+
default_kwargs[field.name] = getattr(self, field.name)
|
679 |
+
|
680 |
+
# Make sure init_kwargs override default kwargs
|
681 |
+
new_kwargs = {**default_kwargs, **init_kwargs}
|
682 |
+
# dtype should be part of `init_kwargs`, but not `new_kwargs`
|
683 |
+
if "dtype" in new_kwargs:
|
684 |
+
new_kwargs.pop("dtype")
|
685 |
+
|
686 |
+
# Get positional arguments aligned with kwargs
|
687 |
+
for i, arg in enumerate(args):
|
688 |
+
name = fields[i].name
|
689 |
+
new_kwargs[name] = arg
|
690 |
+
|
691 |
+
# Take note of the parameters that were not present in the loaded config
|
692 |
+
if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0:
|
693 |
+
new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs))
|
694 |
+
|
695 |
+
getattr(self, "register_to_config")(**new_kwargs)
|
696 |
+
original_init(self, *args, **kwargs)
|
697 |
+
|
698 |
+
cls.__init__ = init
|
699 |
+
return cls
|
diffusers/dependency_versions_check.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from .dependency_versions_table import deps
|
16 |
+
from .utils.versions import require_version, require_version_core
|
17 |
+
|
18 |
+
|
19 |
+
# define which module versions we always want to check at run time
|
20 |
+
# (usually the ones defined in `install_requires` in setup.py)
|
21 |
+
#
|
22 |
+
# order specific notes:
|
23 |
+
# - tqdm must be checked before tokenizers
|
24 |
+
|
25 |
+
pkgs_to_check_at_runtime = "python requests filelock numpy".split()
|
26 |
+
for pkg in pkgs_to_check_at_runtime:
|
27 |
+
if pkg in deps:
|
28 |
+
require_version_core(deps[pkg])
|
29 |
+
else:
|
30 |
+
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
|
31 |
+
|
32 |
+
|
33 |
+
def dep_version_check(pkg, hint=None):
|
34 |
+
require_version(deps[pkg], hint)
|
diffusers/dependency_versions_table.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# THIS FILE HAS BEEN AUTOGENERATED. To update:
|
2 |
+
# 1. modify the `_deps` dict in setup.py
|
3 |
+
# 2. run `make deps_table_update`
|
4 |
+
deps = {
|
5 |
+
"Pillow": "Pillow",
|
6 |
+
"accelerate": "accelerate>=0.11.0",
|
7 |
+
"compel": "compel==0.1.8",
|
8 |
+
"datasets": "datasets",
|
9 |
+
"filelock": "filelock",
|
10 |
+
"flax": "flax>=0.4.1",
|
11 |
+
"hf-doc-builder": "hf-doc-builder>=0.3.0",
|
12 |
+
"huggingface-hub": "huggingface-hub>=0.20.2",
|
13 |
+
"requests-mock": "requests-mock==1.10.0",
|
14 |
+
"importlib_metadata": "importlib_metadata",
|
15 |
+
"invisible-watermark": "invisible-watermark>=0.2.0",
|
16 |
+
"isort": "isort>=5.5.4",
|
17 |
+
"jax": "jax>=0.4.1",
|
18 |
+
"jaxlib": "jaxlib>=0.4.1",
|
19 |
+
"Jinja2": "Jinja2",
|
20 |
+
"k-diffusion": "k-diffusion>=0.0.12",
|
21 |
+
"torchsde": "torchsde",
|
22 |
+
"note_seq": "note_seq",
|
23 |
+
"librosa": "librosa",
|
24 |
+
"numpy": "numpy",
|
25 |
+
"omegaconf": "omegaconf",
|
26 |
+
"parameterized": "parameterized",
|
27 |
+
"peft": "peft>=0.6.0",
|
28 |
+
"protobuf": "protobuf>=3.20.3,<4",
|
29 |
+
"pytest": "pytest",
|
30 |
+
"pytest-timeout": "pytest-timeout",
|
31 |
+
"pytest-xdist": "pytest-xdist",
|
32 |
+
"python": "python>=3.8.0",
|
33 |
+
"ruff": "ruff==0.1.5",
|
34 |
+
"safetensors": "safetensors>=0.3.1",
|
35 |
+
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
|
36 |
+
"GitPython": "GitPython<3.1.19",
|
37 |
+
"scipy": "scipy",
|
38 |
+
"onnx": "onnx",
|
39 |
+
"regex": "regex!=2019.12.17",
|
40 |
+
"requests": "requests",
|
41 |
+
"tensorboard": "tensorboard",
|
42 |
+
"torch": "torch>=1.4",
|
43 |
+
"torchvision": "torchvision",
|
44 |
+
"transformers": "transformers>=4.25.1",
|
45 |
+
"urllib3": "urllib3<=2.0.0",
|
46 |
+
}
|
diffusers/experimental/README.md
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 🧨 Diffusers Experimental
|
2 |
+
|
3 |
+
We are adding experimental code to support novel applications and usages of the Diffusers library.
|
4 |
+
Currently, the following experiments are supported:
|
5 |
+
* Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model.
|
diffusers/experimental/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .rl import ValueGuidedRLPipeline
|
diffusers/experimental/rl/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .value_guided_sampling import ValueGuidedRLPipeline
|
diffusers/experimental/rl/value_guided_sampling.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import torch
|
17 |
+
import tqdm
|
18 |
+
|
19 |
+
from ...models.unet_1d import UNet1DModel
|
20 |
+
from ...pipelines import DiffusionPipeline
|
21 |
+
from ...utils.dummy_pt_objects import DDPMScheduler
|
22 |
+
from ...utils.torch_utils import randn_tensor
|
23 |
+
|
24 |
+
|
25 |
+
class ValueGuidedRLPipeline(DiffusionPipeline):
|
26 |
+
r"""
|
27 |
+
Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.
|
28 |
+
|
29 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
30 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
31 |
+
|
32 |
+
Parameters:
|
33 |
+
value_function ([`UNet1DModel`]):
|
34 |
+
A specialized UNet for fine-tuning trajectories base on reward.
|
35 |
+
unet ([`UNet1DModel`]):
|
36 |
+
UNet architecture to denoise the encoded trajectories.
|
37 |
+
scheduler ([`SchedulerMixin`]):
|
38 |
+
A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this
|
39 |
+
application is [`DDPMScheduler`].
|
40 |
+
env ():
|
41 |
+
An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.
|
42 |
+
"""
|
43 |
+
|
44 |
+
def __init__(
|
45 |
+
self,
|
46 |
+
value_function: UNet1DModel,
|
47 |
+
unet: UNet1DModel,
|
48 |
+
scheduler: DDPMScheduler,
|
49 |
+
env,
|
50 |
+
):
|
51 |
+
super().__init__()
|
52 |
+
|
53 |
+
self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env)
|
54 |
+
|
55 |
+
self.data = env.get_dataset()
|
56 |
+
self.means = {}
|
57 |
+
for key in self.data.keys():
|
58 |
+
try:
|
59 |
+
self.means[key] = self.data[key].mean()
|
60 |
+
except: # noqa: E722
|
61 |
+
pass
|
62 |
+
self.stds = {}
|
63 |
+
for key in self.data.keys():
|
64 |
+
try:
|
65 |
+
self.stds[key] = self.data[key].std()
|
66 |
+
except: # noqa: E722
|
67 |
+
pass
|
68 |
+
self.state_dim = env.observation_space.shape[0]
|
69 |
+
self.action_dim = env.action_space.shape[0]
|
70 |
+
|
71 |
+
def normalize(self, x_in, key):
|
72 |
+
return (x_in - self.means[key]) / self.stds[key]
|
73 |
+
|
74 |
+
def de_normalize(self, x_in, key):
|
75 |
+
return x_in * self.stds[key] + self.means[key]
|
76 |
+
|
77 |
+
def to_torch(self, x_in):
|
78 |
+
if isinstance(x_in, dict):
|
79 |
+
return {k: self.to_torch(v) for k, v in x_in.items()}
|
80 |
+
elif torch.is_tensor(x_in):
|
81 |
+
return x_in.to(self.unet.device)
|
82 |
+
return torch.tensor(x_in, device=self.unet.device)
|
83 |
+
|
84 |
+
def reset_x0(self, x_in, cond, act_dim):
|
85 |
+
for key, val in cond.items():
|
86 |
+
x_in[:, key, act_dim:] = val.clone()
|
87 |
+
return x_in
|
88 |
+
|
89 |
+
def run_diffusion(self, x, conditions, n_guide_steps, scale):
|
90 |
+
batch_size = x.shape[0]
|
91 |
+
y = None
|
92 |
+
for i in tqdm.tqdm(self.scheduler.timesteps):
|
93 |
+
# create batch of timesteps to pass into model
|
94 |
+
timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)
|
95 |
+
for _ in range(n_guide_steps):
|
96 |
+
with torch.enable_grad():
|
97 |
+
x.requires_grad_()
|
98 |
+
|
99 |
+
# permute to match dimension for pre-trained models
|
100 |
+
y = self.value_function(x.permute(0, 2, 1), timesteps).sample
|
101 |
+
grad = torch.autograd.grad([y.sum()], [x])[0]
|
102 |
+
|
103 |
+
posterior_variance = self.scheduler._get_variance(i)
|
104 |
+
model_std = torch.exp(0.5 * posterior_variance)
|
105 |
+
grad = model_std * grad
|
106 |
+
|
107 |
+
grad[timesteps < 2] = 0
|
108 |
+
x = x.detach()
|
109 |
+
x = x + scale * grad
|
110 |
+
x = self.reset_x0(x, conditions, self.action_dim)
|
111 |
+
|
112 |
+
prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)
|
113 |
+
|
114 |
+
# TODO: verify deprecation of this kwarg
|
115 |
+
x = self.scheduler.step(prev_x, i, x)["prev_sample"]
|
116 |
+
|
117 |
+
# apply conditions to the trajectory (set the initial state)
|
118 |
+
x = self.reset_x0(x, conditions, self.action_dim)
|
119 |
+
x = self.to_torch(x)
|
120 |
+
return x, y
|
121 |
+
|
122 |
+
def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):
|
123 |
+
# normalize the observations and create batch dimension
|
124 |
+
obs = self.normalize(obs, "observations")
|
125 |
+
obs = obs[None].repeat(batch_size, axis=0)
|
126 |
+
|
127 |
+
conditions = {0: self.to_torch(obs)}
|
128 |
+
shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)
|
129 |
+
|
130 |
+
# generate initial noise and apply our conditions (to make the trajectories start at current state)
|
131 |
+
x1 = randn_tensor(shape, device=self.unet.device)
|
132 |
+
x = self.reset_x0(x1, conditions, self.action_dim)
|
133 |
+
x = self.to_torch(x)
|
134 |
+
|
135 |
+
# run the diffusion process
|
136 |
+
x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
|
137 |
+
|
138 |
+
# sort output trajectories by value
|
139 |
+
sorted_idx = y.argsort(0, descending=True).squeeze()
|
140 |
+
sorted_values = x[sorted_idx]
|
141 |
+
actions = sorted_values[:, :, : self.action_dim]
|
142 |
+
actions = actions.detach().cpu().numpy()
|
143 |
+
denorm_actions = self.de_normalize(actions, key="actions")
|
144 |
+
|
145 |
+
# select the action with the highest value
|
146 |
+
if y is not None:
|
147 |
+
selected_index = 0
|
148 |
+
else:
|
149 |
+
# if we didn't run value guiding, select a random action
|
150 |
+
selected_index = np.random.randint(0, batch_size)
|
151 |
+
|
152 |
+
denorm_actions = denorm_actions[selected_index, 0]
|
153 |
+
return denorm_actions
|
diffusers/image_processor.py
ADDED
@@ -0,0 +1,884 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import warnings
|
16 |
+
from typing import List, Optional, Tuple, Union
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
import PIL.Image
|
20 |
+
import torch
|
21 |
+
from PIL import Image, ImageFilter, ImageOps
|
22 |
+
|
23 |
+
from .configuration_utils import ConfigMixin, register_to_config
|
24 |
+
from .utils import CONFIG_NAME, PIL_INTERPOLATION, deprecate
|
25 |
+
|
26 |
+
|
27 |
+
PipelineImageInput = Union[
|
28 |
+
PIL.Image.Image,
|
29 |
+
np.ndarray,
|
30 |
+
torch.FloatTensor,
|
31 |
+
List[PIL.Image.Image],
|
32 |
+
List[np.ndarray],
|
33 |
+
List[torch.FloatTensor],
|
34 |
+
]
|
35 |
+
|
36 |
+
PipelineDepthInput = PipelineImageInput
|
37 |
+
|
38 |
+
|
39 |
+
class VaeImageProcessor(ConfigMixin):
|
40 |
+
"""
|
41 |
+
Image processor for VAE.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
45 |
+
Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept
|
46 |
+
`height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method.
|
47 |
+
vae_scale_factor (`int`, *optional*, defaults to `8`):
|
48 |
+
VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
|
49 |
+
resample (`str`, *optional*, defaults to `lanczos`):
|
50 |
+
Resampling filter to use when resizing the image.
|
51 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
52 |
+
Whether to normalize the image to [-1,1].
|
53 |
+
do_binarize (`bool`, *optional*, defaults to `False`):
|
54 |
+
Whether to binarize the image to 0/1.
|
55 |
+
do_convert_rgb (`bool`, *optional*, defaults to be `False`):
|
56 |
+
Whether to convert the images to RGB format.
|
57 |
+
do_convert_grayscale (`bool`, *optional*, defaults to be `False`):
|
58 |
+
Whether to convert the images to grayscale format.
|
59 |
+
"""
|
60 |
+
|
61 |
+
config_name = CONFIG_NAME
|
62 |
+
|
63 |
+
@register_to_config
|
64 |
+
def __init__(
|
65 |
+
self,
|
66 |
+
do_resize: bool = True,
|
67 |
+
vae_scale_factor: int = 8,
|
68 |
+
resample: str = "lanczos",
|
69 |
+
do_normalize: bool = True,
|
70 |
+
do_binarize: bool = False,
|
71 |
+
do_convert_rgb: bool = False,
|
72 |
+
do_convert_grayscale: bool = False,
|
73 |
+
):
|
74 |
+
super().__init__()
|
75 |
+
if do_convert_rgb and do_convert_grayscale:
|
76 |
+
raise ValueError(
|
77 |
+
"`do_convert_rgb` and `do_convert_grayscale` can not both be set to `True`,"
|
78 |
+
" if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.",
|
79 |
+
" if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`",
|
80 |
+
)
|
81 |
+
self.config.do_convert_rgb = False
|
82 |
+
|
83 |
+
@staticmethod
|
84 |
+
def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]:
|
85 |
+
"""
|
86 |
+
Convert a numpy image or a batch of images to a PIL image.
|
87 |
+
"""
|
88 |
+
if images.ndim == 3:
|
89 |
+
images = images[None, ...]
|
90 |
+
images = (images * 255).round().astype("uint8")
|
91 |
+
if images.shape[-1] == 1:
|
92 |
+
# special case for grayscale (single channel) images
|
93 |
+
pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
|
94 |
+
else:
|
95 |
+
pil_images = [Image.fromarray(image) for image in images]
|
96 |
+
|
97 |
+
return pil_images
|
98 |
+
|
99 |
+
@staticmethod
|
100 |
+
def pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray:
|
101 |
+
"""
|
102 |
+
Convert a PIL image or a list of PIL images to NumPy arrays.
|
103 |
+
"""
|
104 |
+
if not isinstance(images, list):
|
105 |
+
images = [images]
|
106 |
+
images = [np.array(image).astype(np.float32) / 255.0 for image in images]
|
107 |
+
images = np.stack(images, axis=0)
|
108 |
+
|
109 |
+
return images
|
110 |
+
|
111 |
+
@staticmethod
|
112 |
+
def numpy_to_pt(images: np.ndarray) -> torch.FloatTensor:
|
113 |
+
"""
|
114 |
+
Convert a NumPy image to a PyTorch tensor.
|
115 |
+
"""
|
116 |
+
if images.ndim == 3:
|
117 |
+
images = images[..., None]
|
118 |
+
|
119 |
+
images = torch.from_numpy(images.transpose(0, 3, 1, 2))
|
120 |
+
return images
|
121 |
+
|
122 |
+
@staticmethod
|
123 |
+
def pt_to_numpy(images: torch.FloatTensor) -> np.ndarray:
|
124 |
+
"""
|
125 |
+
Convert a PyTorch tensor to a NumPy image.
|
126 |
+
"""
|
127 |
+
images = images.cpu().permute(0, 2, 3, 1).float().numpy()
|
128 |
+
return images
|
129 |
+
|
130 |
+
@staticmethod
|
131 |
+
def normalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
|
132 |
+
"""
|
133 |
+
Normalize an image array to [-1,1].
|
134 |
+
"""
|
135 |
+
return 2.0 * images - 1.0
|
136 |
+
|
137 |
+
@staticmethod
|
138 |
+
def denormalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
|
139 |
+
"""
|
140 |
+
Denormalize an image array to [0,1].
|
141 |
+
"""
|
142 |
+
return (images / 2 + 0.5).clamp(0, 1)
|
143 |
+
|
144 |
+
@staticmethod
|
145 |
+
def convert_to_rgb(image: PIL.Image.Image) -> PIL.Image.Image:
|
146 |
+
"""
|
147 |
+
Converts a PIL image to RGB format.
|
148 |
+
"""
|
149 |
+
image = image.convert("RGB")
|
150 |
+
|
151 |
+
return image
|
152 |
+
|
153 |
+
@staticmethod
|
154 |
+
def convert_to_grayscale(image: PIL.Image.Image) -> PIL.Image.Image:
|
155 |
+
"""
|
156 |
+
Converts a PIL image to grayscale format.
|
157 |
+
"""
|
158 |
+
image = image.convert("L")
|
159 |
+
|
160 |
+
return image
|
161 |
+
|
162 |
+
@staticmethod
|
163 |
+
def blur(image: PIL.Image.Image, blur_factor: int = 4) -> PIL.Image.Image:
|
164 |
+
"""
|
165 |
+
Applies Gaussian blur to an image.
|
166 |
+
"""
|
167 |
+
image = image.filter(ImageFilter.GaussianBlur(blur_factor))
|
168 |
+
|
169 |
+
return image
|
170 |
+
|
171 |
+
@staticmethod
|
172 |
+
def get_crop_region(mask_image: PIL.Image.Image, width: int, height: int, pad=0):
|
173 |
+
"""
|
174 |
+
Finds a rectangular region that contains all masked ares in an image, and expands region to match the aspect ratio of the original image;
|
175 |
+
for example, if user drew mask in a 128x32 region, and the dimensions for processing are 512x512, the region will be expanded to 128x128.
|
176 |
+
|
177 |
+
Args:
|
178 |
+
mask_image (PIL.Image.Image): Mask image.
|
179 |
+
width (int): Width of the image to be processed.
|
180 |
+
height (int): Height of the image to be processed.
|
181 |
+
pad (int, optional): Padding to be added to the crop region. Defaults to 0.
|
182 |
+
|
183 |
+
Returns:
|
184 |
+
tuple: (x1, y1, x2, y2) represent a rectangular region that contains all masked ares in an image and matches the original aspect ratio.
|
185 |
+
"""
|
186 |
+
|
187 |
+
mask_image = mask_image.convert("L")
|
188 |
+
mask = np.array(mask_image)
|
189 |
+
|
190 |
+
# 1. find a rectangular region that contains all masked ares in an image
|
191 |
+
h, w = mask.shape
|
192 |
+
crop_left = 0
|
193 |
+
for i in range(w):
|
194 |
+
if not (mask[:, i] == 0).all():
|
195 |
+
break
|
196 |
+
crop_left += 1
|
197 |
+
|
198 |
+
crop_right = 0
|
199 |
+
for i in reversed(range(w)):
|
200 |
+
if not (mask[:, i] == 0).all():
|
201 |
+
break
|
202 |
+
crop_right += 1
|
203 |
+
|
204 |
+
crop_top = 0
|
205 |
+
for i in range(h):
|
206 |
+
if not (mask[i] == 0).all():
|
207 |
+
break
|
208 |
+
crop_top += 1
|
209 |
+
|
210 |
+
crop_bottom = 0
|
211 |
+
for i in reversed(range(h)):
|
212 |
+
if not (mask[i] == 0).all():
|
213 |
+
break
|
214 |
+
crop_bottom += 1
|
215 |
+
|
216 |
+
# 2. add padding to the crop region
|
217 |
+
x1, y1, x2, y2 = (
|
218 |
+
int(max(crop_left - pad, 0)),
|
219 |
+
int(max(crop_top - pad, 0)),
|
220 |
+
int(min(w - crop_right + pad, w)),
|
221 |
+
int(min(h - crop_bottom + pad, h)),
|
222 |
+
)
|
223 |
+
|
224 |
+
# 3. expands crop region to match the aspect ratio of the image to be processed
|
225 |
+
ratio_crop_region = (x2 - x1) / (y2 - y1)
|
226 |
+
ratio_processing = width / height
|
227 |
+
|
228 |
+
if ratio_crop_region > ratio_processing:
|
229 |
+
desired_height = (x2 - x1) / ratio_processing
|
230 |
+
desired_height_diff = int(desired_height - (y2 - y1))
|
231 |
+
y1 -= desired_height_diff // 2
|
232 |
+
y2 += desired_height_diff - desired_height_diff // 2
|
233 |
+
if y2 >= mask_image.height:
|
234 |
+
diff = y2 - mask_image.height
|
235 |
+
y2 -= diff
|
236 |
+
y1 -= diff
|
237 |
+
if y1 < 0:
|
238 |
+
y2 -= y1
|
239 |
+
y1 -= y1
|
240 |
+
if y2 >= mask_image.height:
|
241 |
+
y2 = mask_image.height
|
242 |
+
else:
|
243 |
+
desired_width = (y2 - y1) * ratio_processing
|
244 |
+
desired_width_diff = int(desired_width - (x2 - x1))
|
245 |
+
x1 -= desired_width_diff // 2
|
246 |
+
x2 += desired_width_diff - desired_width_diff // 2
|
247 |
+
if x2 >= mask_image.width:
|
248 |
+
diff = x2 - mask_image.width
|
249 |
+
x2 -= diff
|
250 |
+
x1 -= diff
|
251 |
+
if x1 < 0:
|
252 |
+
x2 -= x1
|
253 |
+
x1 -= x1
|
254 |
+
if x2 >= mask_image.width:
|
255 |
+
x2 = mask_image.width
|
256 |
+
|
257 |
+
return x1, y1, x2, y2
|
258 |
+
|
259 |
+
def _resize_and_fill(
|
260 |
+
self,
|
261 |
+
image: PIL.Image.Image,
|
262 |
+
width: int,
|
263 |
+
height: int,
|
264 |
+
) -> PIL.Image.Image:
|
265 |
+
"""
|
266 |
+
Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
|
267 |
+
|
268 |
+
Args:
|
269 |
+
image: The image to resize.
|
270 |
+
width: The width to resize the image to.
|
271 |
+
height: The height to resize the image to.
|
272 |
+
"""
|
273 |
+
|
274 |
+
ratio = width / height
|
275 |
+
src_ratio = image.width / image.height
|
276 |
+
|
277 |
+
src_w = width if ratio < src_ratio else image.width * height // image.height
|
278 |
+
src_h = height if ratio >= src_ratio else image.height * width // image.width
|
279 |
+
|
280 |
+
resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"])
|
281 |
+
res = Image.new("RGB", (width, height))
|
282 |
+
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
|
283 |
+
|
284 |
+
if ratio < src_ratio:
|
285 |
+
fill_height = height // 2 - src_h // 2
|
286 |
+
if fill_height > 0:
|
287 |
+
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
|
288 |
+
res.paste(
|
289 |
+
resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)),
|
290 |
+
box=(0, fill_height + src_h),
|
291 |
+
)
|
292 |
+
elif ratio > src_ratio:
|
293 |
+
fill_width = width // 2 - src_w // 2
|
294 |
+
if fill_width > 0:
|
295 |
+
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
|
296 |
+
res.paste(
|
297 |
+
resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)),
|
298 |
+
box=(fill_width + src_w, 0),
|
299 |
+
)
|
300 |
+
|
301 |
+
return res
|
302 |
+
|
303 |
+
def _resize_and_crop(
|
304 |
+
self,
|
305 |
+
image: PIL.Image.Image,
|
306 |
+
width: int,
|
307 |
+
height: int,
|
308 |
+
) -> PIL.Image.Image:
|
309 |
+
"""
|
310 |
+
Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
|
311 |
+
|
312 |
+
Args:
|
313 |
+
image: The image to resize.
|
314 |
+
width: The width to resize the image to.
|
315 |
+
height: The height to resize the image to.
|
316 |
+
"""
|
317 |
+
ratio = width / height
|
318 |
+
src_ratio = image.width / image.height
|
319 |
+
|
320 |
+
src_w = width if ratio > src_ratio else image.width * height // image.height
|
321 |
+
src_h = height if ratio <= src_ratio else image.height * width // image.width
|
322 |
+
|
323 |
+
resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"])
|
324 |
+
res = Image.new("RGB", (width, height))
|
325 |
+
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
|
326 |
+
return res
|
327 |
+
|
328 |
+
def resize(
|
329 |
+
self,
|
330 |
+
image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
|
331 |
+
height: int,
|
332 |
+
width: int,
|
333 |
+
resize_mode: str = "default", # "defalt", "fill", "crop"
|
334 |
+
) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]:
|
335 |
+
"""
|
336 |
+
Resize image.
|
337 |
+
|
338 |
+
Args:
|
339 |
+
image (`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`):
|
340 |
+
The image input, can be a PIL image, numpy array or pytorch tensor.
|
341 |
+
height (`int`):
|
342 |
+
The height to resize to.
|
343 |
+
width (`int`):
|
344 |
+
The width to resize to.
|
345 |
+
resize_mode (`str`, *optional*, defaults to `default`):
|
346 |
+
The resize mode to use, can be one of `default` or `fill`. If `default`, will resize the image to fit
|
347 |
+
within the specified width and height, and it may not maintaining the original aspect ratio.
|
348 |
+
If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
|
349 |
+
within the dimensions, filling empty with data from image.
|
350 |
+
If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
|
351 |
+
within the dimensions, cropping the excess.
|
352 |
+
Note that resize_mode `fill` and `crop` are only supported for PIL image input.
|
353 |
+
|
354 |
+
Returns:
|
355 |
+
`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`:
|
356 |
+
The resized image.
|
357 |
+
"""
|
358 |
+
if resize_mode != "default" and not isinstance(image, PIL.Image.Image):
|
359 |
+
raise ValueError(f"Only PIL image input is supported for resize_mode {resize_mode}")
|
360 |
+
if isinstance(image, PIL.Image.Image):
|
361 |
+
if resize_mode == "default":
|
362 |
+
image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample])
|
363 |
+
elif resize_mode == "fill":
|
364 |
+
image = self._resize_and_fill(image, width, height)
|
365 |
+
elif resize_mode == "crop":
|
366 |
+
image = self._resize_and_crop(image, width, height)
|
367 |
+
else:
|
368 |
+
raise ValueError(f"resize_mode {resize_mode} is not supported")
|
369 |
+
|
370 |
+
elif isinstance(image, torch.Tensor):
|
371 |
+
image = torch.nn.functional.interpolate(
|
372 |
+
image,
|
373 |
+
size=(height, width),
|
374 |
+
)
|
375 |
+
elif isinstance(image, np.ndarray):
|
376 |
+
image = self.numpy_to_pt(image)
|
377 |
+
image = torch.nn.functional.interpolate(
|
378 |
+
image,
|
379 |
+
size=(height, width),
|
380 |
+
)
|
381 |
+
image = self.pt_to_numpy(image)
|
382 |
+
return image
|
383 |
+
|
384 |
+
def binarize(self, image: PIL.Image.Image) -> PIL.Image.Image:
|
385 |
+
"""
|
386 |
+
Create a mask.
|
387 |
+
|
388 |
+
Args:
|
389 |
+
image (`PIL.Image.Image`):
|
390 |
+
The image input, should be a PIL image.
|
391 |
+
|
392 |
+
Returns:
|
393 |
+
`PIL.Image.Image`:
|
394 |
+
The binarized image. Values less than 0.5 are set to 0, values greater than 0.5 are set to 1.
|
395 |
+
"""
|
396 |
+
image[image < 0.5] = 0
|
397 |
+
image[image >= 0.5] = 1
|
398 |
+
|
399 |
+
return image
|
400 |
+
|
401 |
+
def get_default_height_width(
|
402 |
+
self,
|
403 |
+
image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
|
404 |
+
height: Optional[int] = None,
|
405 |
+
width: Optional[int] = None,
|
406 |
+
) -> Tuple[int, int]:
|
407 |
+
"""
|
408 |
+
This function return the height and width that are downscaled to the next integer multiple of
|
409 |
+
`vae_scale_factor`.
|
410 |
+
|
411 |
+
Args:
|
412 |
+
image(`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`):
|
413 |
+
The image input, can be a PIL image, numpy array or pytorch tensor. if it is a numpy array, should have
|
414 |
+
shape `[batch, height, width]` or `[batch, height, width, channel]` if it is a pytorch tensor, should
|
415 |
+
have shape `[batch, channel, height, width]`.
|
416 |
+
height (`int`, *optional*, defaults to `None`):
|
417 |
+
The height in preprocessed image. If `None`, will use the height of `image` input.
|
418 |
+
width (`int`, *optional*`, defaults to `None`):
|
419 |
+
The width in preprocessed. If `None`, will use the width of the `image` input.
|
420 |
+
"""
|
421 |
+
|
422 |
+
if height is None:
|
423 |
+
if isinstance(image, PIL.Image.Image):
|
424 |
+
height = image.height
|
425 |
+
elif isinstance(image, torch.Tensor):
|
426 |
+
height = image.shape[2]
|
427 |
+
else:
|
428 |
+
height = image.shape[1]
|
429 |
+
|
430 |
+
if width is None:
|
431 |
+
if isinstance(image, PIL.Image.Image):
|
432 |
+
width = image.width
|
433 |
+
elif isinstance(image, torch.Tensor):
|
434 |
+
width = image.shape[3]
|
435 |
+
else:
|
436 |
+
width = image.shape[2]
|
437 |
+
|
438 |
+
width, height = (
|
439 |
+
x - x % self.config.vae_scale_factor for x in (width, height)
|
440 |
+
) # resize to integer multiple of vae_scale_factor
|
441 |
+
|
442 |
+
return height, width
|
443 |
+
|
444 |
+
def preprocess(
|
445 |
+
self,
|
446 |
+
image: PipelineImageInput,
|
447 |
+
height: Optional[int] = None,
|
448 |
+
width: Optional[int] = None,
|
449 |
+
resize_mode: str = "default", # "defalt", "fill", "crop"
|
450 |
+
crops_coords: Optional[Tuple[int, int, int, int]] = None,
|
451 |
+
) -> torch.Tensor:
|
452 |
+
"""
|
453 |
+
Preprocess the image input.
|
454 |
+
|
455 |
+
Args:
|
456 |
+
image (`pipeline_image_input`):
|
457 |
+
The image input, accepted formats are PIL images, NumPy arrays, PyTorch tensors; Also accept list of supported formats.
|
458 |
+
height (`int`, *optional*, defaults to `None`):
|
459 |
+
The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height.
|
460 |
+
width (`int`, *optional*`, defaults to `None`):
|
461 |
+
The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width.
|
462 |
+
resize_mode (`str`, *optional*, defaults to `default`):
|
463 |
+
The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit
|
464 |
+
within the specified width and height, and it may not maintaining the original aspect ratio.
|
465 |
+
If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
|
466 |
+
within the dimensions, filling empty with data from image.
|
467 |
+
If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
|
468 |
+
within the dimensions, cropping the excess.
|
469 |
+
Note that resize_mode `fill` and `crop` are only supported for PIL image input.
|
470 |
+
crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`):
|
471 |
+
The crop coordinates for each image in the batch. If `None`, will not crop the image.
|
472 |
+
"""
|
473 |
+
supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
|
474 |
+
|
475 |
+
# Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
|
476 |
+
if self.config.do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and image.ndim == 3:
|
477 |
+
if isinstance(image, torch.Tensor):
|
478 |
+
# if image is a pytorch tensor could have 2 possible shapes:
|
479 |
+
# 1. batch x height x width: we should insert the channel dimension at position 1
|
480 |
+
# 2. channnel x height x width: we should insert batch dimension at position 0,
|
481 |
+
# however, since both channel and batch dimension has same size 1, it is same to insert at position 1
|
482 |
+
# for simplicity, we insert a dimension of size 1 at position 1 for both cases
|
483 |
+
image = image.unsqueeze(1)
|
484 |
+
else:
|
485 |
+
# if it is a numpy array, it could have 2 possible shapes:
|
486 |
+
# 1. batch x height x width: insert channel dimension on last position
|
487 |
+
# 2. height x width x channel: insert batch dimension on first position
|
488 |
+
if image.shape[-1] == 1:
|
489 |
+
image = np.expand_dims(image, axis=0)
|
490 |
+
else:
|
491 |
+
image = np.expand_dims(image, axis=-1)
|
492 |
+
|
493 |
+
if isinstance(image, supported_formats):
|
494 |
+
image = [image]
|
495 |
+
elif not (isinstance(image, list) and all(isinstance(i, supported_formats) for i in image)):
|
496 |
+
raise ValueError(
|
497 |
+
f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support {', '.join(supported_formats)}"
|
498 |
+
)
|
499 |
+
|
500 |
+
if isinstance(image[0], PIL.Image.Image):
|
501 |
+
if crops_coords is not None:
|
502 |
+
image = [i.crop(crops_coords) for i in image]
|
503 |
+
if self.config.do_resize:
|
504 |
+
height, width = self.get_default_height_width(image[0], height, width)
|
505 |
+
image = [self.resize(i, height, width, resize_mode=resize_mode) for i in image]
|
506 |
+
if self.config.do_convert_rgb:
|
507 |
+
image = [self.convert_to_rgb(i) for i in image]
|
508 |
+
elif self.config.do_convert_grayscale:
|
509 |
+
image = [self.convert_to_grayscale(i) for i in image]
|
510 |
+
image = self.pil_to_numpy(image) # to np
|
511 |
+
image = self.numpy_to_pt(image) # to pt
|
512 |
+
|
513 |
+
elif isinstance(image[0], np.ndarray):
|
514 |
+
image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
|
515 |
+
|
516 |
+
image = self.numpy_to_pt(image)
|
517 |
+
|
518 |
+
height, width = self.get_default_height_width(image, height, width)
|
519 |
+
if self.config.do_resize:
|
520 |
+
image = self.resize(image, height, width)
|
521 |
+
|
522 |
+
elif isinstance(image[0], torch.Tensor):
|
523 |
+
image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
|
524 |
+
|
525 |
+
if self.config.do_convert_grayscale and image.ndim == 3:
|
526 |
+
image = image.unsqueeze(1)
|
527 |
+
|
528 |
+
channel = image.shape[1]
|
529 |
+
# don't need any preprocess if the image is latents
|
530 |
+
if channel == 4:
|
531 |
+
return image
|
532 |
+
|
533 |
+
height, width = self.get_default_height_width(image, height, width)
|
534 |
+
if self.config.do_resize:
|
535 |
+
image = self.resize(image, height, width)
|
536 |
+
|
537 |
+
# expected range [0,1], normalize to [-1,1]
|
538 |
+
do_normalize = self.config.do_normalize
|
539 |
+
if do_normalize and image.min() < 0:
|
540 |
+
warnings.warn(
|
541 |
+
"Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
|
542 |
+
f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]",
|
543 |
+
FutureWarning,
|
544 |
+
)
|
545 |
+
do_normalize = False
|
546 |
+
|
547 |
+
if do_normalize:
|
548 |
+
image = self.normalize(image)
|
549 |
+
|
550 |
+
if self.config.do_binarize:
|
551 |
+
image = self.binarize(image)
|
552 |
+
|
553 |
+
return image
|
554 |
+
|
555 |
+
def postprocess(
|
556 |
+
self,
|
557 |
+
image: torch.FloatTensor,
|
558 |
+
output_type: str = "pil",
|
559 |
+
do_denormalize: Optional[List[bool]] = None,
|
560 |
+
) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]:
|
561 |
+
"""
|
562 |
+
Postprocess the image output from tensor to `output_type`.
|
563 |
+
|
564 |
+
Args:
|
565 |
+
image (`torch.FloatTensor`):
|
566 |
+
The image input, should be a pytorch tensor with shape `B x C x H x W`.
|
567 |
+
output_type (`str`, *optional*, defaults to `pil`):
|
568 |
+
The output type of the image, can be one of `pil`, `np`, `pt`, `latent`.
|
569 |
+
do_denormalize (`List[bool]`, *optional*, defaults to `None`):
|
570 |
+
Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the
|
571 |
+
`VaeImageProcessor` config.
|
572 |
+
|
573 |
+
Returns:
|
574 |
+
`PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`:
|
575 |
+
The postprocessed image.
|
576 |
+
"""
|
577 |
+
if not isinstance(image, torch.Tensor):
|
578 |
+
raise ValueError(
|
579 |
+
f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
|
580 |
+
)
|
581 |
+
if output_type not in ["latent", "pt", "np", "pil"]:
|
582 |
+
deprecation_message = (
|
583 |
+
f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
|
584 |
+
"`pil`, `np`, `pt`, `latent`"
|
585 |
+
)
|
586 |
+
deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
|
587 |
+
output_type = "np"
|
588 |
+
|
589 |
+
if output_type == "latent":
|
590 |
+
return image
|
591 |
+
|
592 |
+
if do_denormalize is None:
|
593 |
+
do_denormalize = [self.config.do_normalize] * image.shape[0]
|
594 |
+
|
595 |
+
image = torch.stack(
|
596 |
+
[self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]
|
597 |
+
)
|
598 |
+
|
599 |
+
if output_type == "pt":
|
600 |
+
return image
|
601 |
+
|
602 |
+
image = self.pt_to_numpy(image)
|
603 |
+
|
604 |
+
if output_type == "np":
|
605 |
+
return image
|
606 |
+
|
607 |
+
if output_type == "pil":
|
608 |
+
return self.numpy_to_pil(image)
|
609 |
+
|
610 |
+
def apply_overlay(
|
611 |
+
self,
|
612 |
+
mask: PIL.Image.Image,
|
613 |
+
init_image: PIL.Image.Image,
|
614 |
+
image: PIL.Image.Image,
|
615 |
+
crop_coords: Optional[Tuple[int, int, int, int]] = None,
|
616 |
+
) -> PIL.Image.Image:
|
617 |
+
"""
|
618 |
+
overlay the inpaint output to the original image
|
619 |
+
"""
|
620 |
+
|
621 |
+
width, height = image.width, image.height
|
622 |
+
|
623 |
+
init_image = self.resize(init_image, width=width, height=height)
|
624 |
+
mask = self.resize(mask, width=width, height=height)
|
625 |
+
|
626 |
+
init_image_masked = PIL.Image.new("RGBa", (width, height))
|
627 |
+
init_image_masked.paste(init_image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert("L")))
|
628 |
+
init_image_masked = init_image_masked.convert("RGBA")
|
629 |
+
|
630 |
+
if crop_coords is not None:
|
631 |
+
x, y, x2, y2 = crop_coords
|
632 |
+
w = x2 - x
|
633 |
+
h = y2 - y
|
634 |
+
base_image = PIL.Image.new("RGBA", (width, height))
|
635 |
+
image = self.resize(image, height=h, width=w, resize_mode="crop")
|
636 |
+
base_image.paste(image, (x, y))
|
637 |
+
image = base_image.convert("RGB")
|
638 |
+
|
639 |
+
image = image.convert("RGBA")
|
640 |
+
image.alpha_composite(init_image_masked)
|
641 |
+
image = image.convert("RGB")
|
642 |
+
|
643 |
+
return image
|
644 |
+
|
645 |
+
|
646 |
+
class VaeImageProcessorLDM3D(VaeImageProcessor):
|
647 |
+
"""
|
648 |
+
Image processor for VAE LDM3D.
|
649 |
+
|
650 |
+
Args:
|
651 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
652 |
+
Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`.
|
653 |
+
vae_scale_factor (`int`, *optional*, defaults to `8`):
|
654 |
+
VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
|
655 |
+
resample (`str`, *optional*, defaults to `lanczos`):
|
656 |
+
Resampling filter to use when resizing the image.
|
657 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
658 |
+
Whether to normalize the image to [-1,1].
|
659 |
+
"""
|
660 |
+
|
661 |
+
config_name = CONFIG_NAME
|
662 |
+
|
663 |
+
@register_to_config
|
664 |
+
def __init__(
|
665 |
+
self,
|
666 |
+
do_resize: bool = True,
|
667 |
+
vae_scale_factor: int = 8,
|
668 |
+
resample: str = "lanczos",
|
669 |
+
do_normalize: bool = True,
|
670 |
+
):
|
671 |
+
super().__init__()
|
672 |
+
|
673 |
+
@staticmethod
|
674 |
+
def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]:
|
675 |
+
"""
|
676 |
+
Convert a NumPy image or a batch of images to a PIL image.
|
677 |
+
"""
|
678 |
+
if images.ndim == 3:
|
679 |
+
images = images[None, ...]
|
680 |
+
images = (images * 255).round().astype("uint8")
|
681 |
+
if images.shape[-1] == 1:
|
682 |
+
# special case for grayscale (single channel) images
|
683 |
+
pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
|
684 |
+
else:
|
685 |
+
pil_images = [Image.fromarray(image[:, :, :3]) for image in images]
|
686 |
+
|
687 |
+
return pil_images
|
688 |
+
|
689 |
+
@staticmethod
|
690 |
+
def depth_pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray:
|
691 |
+
"""
|
692 |
+
Convert a PIL image or a list of PIL images to NumPy arrays.
|
693 |
+
"""
|
694 |
+
if not isinstance(images, list):
|
695 |
+
images = [images]
|
696 |
+
|
697 |
+
images = [np.array(image).astype(np.float32) / (2**16 - 1) for image in images]
|
698 |
+
images = np.stack(images, axis=0)
|
699 |
+
return images
|
700 |
+
|
701 |
+
@staticmethod
|
702 |
+
def rgblike_to_depthmap(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
|
703 |
+
"""
|
704 |
+
Args:
|
705 |
+
image: RGB-like depth image
|
706 |
+
|
707 |
+
Returns: depth map
|
708 |
+
|
709 |
+
"""
|
710 |
+
return image[:, :, 1] * 2**8 + image[:, :, 2]
|
711 |
+
|
712 |
+
def numpy_to_depth(self, images: np.ndarray) -> List[PIL.Image.Image]:
|
713 |
+
"""
|
714 |
+
Convert a NumPy depth image or a batch of images to a PIL image.
|
715 |
+
"""
|
716 |
+
if images.ndim == 3:
|
717 |
+
images = images[None, ...]
|
718 |
+
images_depth = images[:, :, :, 3:]
|
719 |
+
if images.shape[-1] == 6:
|
720 |
+
images_depth = (images_depth * 255).round().astype("uint8")
|
721 |
+
pil_images = [
|
722 |
+
Image.fromarray(self.rgblike_to_depthmap(image_depth), mode="I;16") for image_depth in images_depth
|
723 |
+
]
|
724 |
+
elif images.shape[-1] == 4:
|
725 |
+
images_depth = (images_depth * 65535.0).astype(np.uint16)
|
726 |
+
pil_images = [Image.fromarray(image_depth, mode="I;16") for image_depth in images_depth]
|
727 |
+
else:
|
728 |
+
raise Exception("Not supported")
|
729 |
+
|
730 |
+
return pil_images
|
731 |
+
|
732 |
+
def postprocess(
|
733 |
+
self,
|
734 |
+
image: torch.FloatTensor,
|
735 |
+
output_type: str = "pil",
|
736 |
+
do_denormalize: Optional[List[bool]] = None,
|
737 |
+
) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]:
|
738 |
+
"""
|
739 |
+
Postprocess the image output from tensor to `output_type`.
|
740 |
+
|
741 |
+
Args:
|
742 |
+
image (`torch.FloatTensor`):
|
743 |
+
The image input, should be a pytorch tensor with shape `B x C x H x W`.
|
744 |
+
output_type (`str`, *optional*, defaults to `pil`):
|
745 |
+
The output type of the image, can be one of `pil`, `np`, `pt`, `latent`.
|
746 |
+
do_denormalize (`List[bool]`, *optional*, defaults to `None`):
|
747 |
+
Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the
|
748 |
+
`VaeImageProcessor` config.
|
749 |
+
|
750 |
+
Returns:
|
751 |
+
`PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`:
|
752 |
+
The postprocessed image.
|
753 |
+
"""
|
754 |
+
if not isinstance(image, torch.Tensor):
|
755 |
+
raise ValueError(
|
756 |
+
f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
|
757 |
+
)
|
758 |
+
if output_type not in ["latent", "pt", "np", "pil"]:
|
759 |
+
deprecation_message = (
|
760 |
+
f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
|
761 |
+
"`pil`, `np`, `pt`, `latent`"
|
762 |
+
)
|
763 |
+
deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
|
764 |
+
output_type = "np"
|
765 |
+
|
766 |
+
if do_denormalize is None:
|
767 |
+
do_denormalize = [self.config.do_normalize] * image.shape[0]
|
768 |
+
|
769 |
+
image = torch.stack(
|
770 |
+
[self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]
|
771 |
+
)
|
772 |
+
|
773 |
+
image = self.pt_to_numpy(image)
|
774 |
+
|
775 |
+
if output_type == "np":
|
776 |
+
if image.shape[-1] == 6:
|
777 |
+
image_depth = np.stack([self.rgblike_to_depthmap(im[:, :, 3:]) for im in image], axis=0)
|
778 |
+
else:
|
779 |
+
image_depth = image[:, :, :, 3:]
|
780 |
+
return image[:, :, :, :3], image_depth
|
781 |
+
|
782 |
+
if output_type == "pil":
|
783 |
+
return self.numpy_to_pil(image), self.numpy_to_depth(image)
|
784 |
+
else:
|
785 |
+
raise Exception(f"This type {output_type} is not supported")
|
786 |
+
|
787 |
+
def preprocess(
|
788 |
+
self,
|
789 |
+
rgb: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
|
790 |
+
depth: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
|
791 |
+
height: Optional[int] = None,
|
792 |
+
width: Optional[int] = None,
|
793 |
+
target_res: Optional[int] = None,
|
794 |
+
) -> torch.Tensor:
|
795 |
+
"""
|
796 |
+
Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors.
|
797 |
+
"""
|
798 |
+
supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
|
799 |
+
|
800 |
+
# Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
|
801 |
+
if self.config.do_convert_grayscale and isinstance(rgb, (torch.Tensor, np.ndarray)) and rgb.ndim == 3:
|
802 |
+
raise Exception("This is not yet supported")
|
803 |
+
|
804 |
+
if isinstance(rgb, supported_formats):
|
805 |
+
rgb = [rgb]
|
806 |
+
depth = [depth]
|
807 |
+
elif not (isinstance(rgb, list) and all(isinstance(i, supported_formats) for i in rgb)):
|
808 |
+
raise ValueError(
|
809 |
+
f"Input is in incorrect format: {[type(i) for i in rgb]}. Currently, we only support {', '.join(supported_formats)}"
|
810 |
+
)
|
811 |
+
|
812 |
+
if isinstance(rgb[0], PIL.Image.Image):
|
813 |
+
if self.config.do_convert_rgb:
|
814 |
+
raise Exception("This is not yet supported")
|
815 |
+
# rgb = [self.convert_to_rgb(i) for i in rgb]
|
816 |
+
# depth = [self.convert_to_depth(i) for i in depth] #TODO define convert_to_depth
|
817 |
+
if self.config.do_resize or target_res:
|
818 |
+
height, width = self.get_default_height_width(rgb[0], height, width) if not target_res else target_res
|
819 |
+
rgb = [self.resize(i, height, width) for i in rgb]
|
820 |
+
depth = [self.resize(i, height, width) for i in depth]
|
821 |
+
rgb = self.pil_to_numpy(rgb) # to np
|
822 |
+
rgb = self.numpy_to_pt(rgb) # to pt
|
823 |
+
|
824 |
+
depth = self.depth_pil_to_numpy(depth) # to np
|
825 |
+
depth = self.numpy_to_pt(depth) # to pt
|
826 |
+
|
827 |
+
elif isinstance(rgb[0], np.ndarray):
|
828 |
+
rgb = np.concatenate(rgb, axis=0) if rgb[0].ndim == 4 else np.stack(rgb, axis=0)
|
829 |
+
rgb = self.numpy_to_pt(rgb)
|
830 |
+
height, width = self.get_default_height_width(rgb, height, width)
|
831 |
+
if self.config.do_resize:
|
832 |
+
rgb = self.resize(rgb, height, width)
|
833 |
+
|
834 |
+
depth = np.concatenate(depth, axis=0) if rgb[0].ndim == 4 else np.stack(depth, axis=0)
|
835 |
+
depth = self.numpy_to_pt(depth)
|
836 |
+
height, width = self.get_default_height_width(depth, height, width)
|
837 |
+
if self.config.do_resize:
|
838 |
+
depth = self.resize(depth, height, width)
|
839 |
+
|
840 |
+
elif isinstance(rgb[0], torch.Tensor):
|
841 |
+
raise Exception("This is not yet supported")
|
842 |
+
# rgb = torch.cat(rgb, axis=0) if rgb[0].ndim == 4 else torch.stack(rgb, axis=0)
|
843 |
+
|
844 |
+
# if self.config.do_convert_grayscale and rgb.ndim == 3:
|
845 |
+
# rgb = rgb.unsqueeze(1)
|
846 |
+
|
847 |
+
# channel = rgb.shape[1]
|
848 |
+
|
849 |
+
# height, width = self.get_default_height_width(rgb, height, width)
|
850 |
+
# if self.config.do_resize:
|
851 |
+
# rgb = self.resize(rgb, height, width)
|
852 |
+
|
853 |
+
# depth = torch.cat(depth, axis=0) if depth[0].ndim == 4 else torch.stack(depth, axis=0)
|
854 |
+
|
855 |
+
# if self.config.do_convert_grayscale and depth.ndim == 3:
|
856 |
+
# depth = depth.unsqueeze(1)
|
857 |
+
|
858 |
+
# channel = depth.shape[1]
|
859 |
+
# # don't need any preprocess if the image is latents
|
860 |
+
# if depth == 4:
|
861 |
+
# return rgb, depth
|
862 |
+
|
863 |
+
# height, width = self.get_default_height_width(depth, height, width)
|
864 |
+
# if self.config.do_resize:
|
865 |
+
# depth = self.resize(depth, height, width)
|
866 |
+
# expected range [0,1], normalize to [-1,1]
|
867 |
+
do_normalize = self.config.do_normalize
|
868 |
+
if rgb.min() < 0 and do_normalize:
|
869 |
+
warnings.warn(
|
870 |
+
"Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
|
871 |
+
f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{rgb.min()},{rgb.max()}]",
|
872 |
+
FutureWarning,
|
873 |
+
)
|
874 |
+
do_normalize = False
|
875 |
+
|
876 |
+
if do_normalize:
|
877 |
+
rgb = self.normalize(rgb)
|
878 |
+
depth = self.normalize(depth)
|
879 |
+
|
880 |
+
if self.config.do_binarize:
|
881 |
+
rgb = self.binarize(rgb)
|
882 |
+
depth = self.binarize(depth)
|
883 |
+
|
884 |
+
return rgb, depth
|
diffusers/loaders/__init__.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import TYPE_CHECKING
|
2 |
+
|
3 |
+
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
|
4 |
+
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
|
5 |
+
|
6 |
+
|
7 |
+
def text_encoder_lora_state_dict(text_encoder):
|
8 |
+
deprecate(
|
9 |
+
"text_encoder_load_state_dict in `models`",
|
10 |
+
"0.27.0",
|
11 |
+
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
|
12 |
+
)
|
13 |
+
state_dict = {}
|
14 |
+
|
15 |
+
for name, module in text_encoder_attn_modules(text_encoder):
|
16 |
+
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
|
17 |
+
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
|
18 |
+
|
19 |
+
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
|
20 |
+
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
|
21 |
+
|
22 |
+
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
|
23 |
+
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
|
24 |
+
|
25 |
+
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
|
26 |
+
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
|
27 |
+
|
28 |
+
return state_dict
|
29 |
+
|
30 |
+
|
31 |
+
if is_transformers_available():
|
32 |
+
|
33 |
+
def text_encoder_attn_modules(text_encoder):
|
34 |
+
deprecate(
|
35 |
+
"text_encoder_attn_modules in `models`",
|
36 |
+
"0.27.0",
|
37 |
+
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
|
38 |
+
)
|
39 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection
|
40 |
+
|
41 |
+
attn_modules = []
|
42 |
+
|
43 |
+
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
|
44 |
+
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
|
45 |
+
name = f"text_model.encoder.layers.{i}.self_attn"
|
46 |
+
mod = layer.self_attn
|
47 |
+
attn_modules.append((name, mod))
|
48 |
+
else:
|
49 |
+
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
|
50 |
+
|
51 |
+
return attn_modules
|
52 |
+
|
53 |
+
|
54 |
+
_import_structure = {}
|
55 |
+
|
56 |
+
if is_torch_available():
|
57 |
+
_import_structure["single_file"] = ["FromOriginalControlnetMixin", "FromOriginalVAEMixin"]
|
58 |
+
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
|
59 |
+
_import_structure["utils"] = ["AttnProcsLayers"]
|
60 |
+
|
61 |
+
if is_transformers_available():
|
62 |
+
_import_structure["single_file"].extend(["FromSingleFileMixin"])
|
63 |
+
_import_structure["lora"] = ["LoraLoaderMixin", "StableDiffusionXLLoraLoaderMixin"]
|
64 |
+
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
|
65 |
+
_import_structure["ip_adapter"] = ["IPAdapterMixin"]
|
66 |
+
|
67 |
+
_import_structure["peft"] = ["PeftAdapterMixin"]
|
68 |
+
|
69 |
+
|
70 |
+
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
71 |
+
if is_torch_available():
|
72 |
+
from .single_file import FromOriginalControlnetMixin, FromOriginalVAEMixin
|
73 |
+
from .unet import UNet2DConditionLoadersMixin
|
74 |
+
from .utils import AttnProcsLayers
|
75 |
+
|
76 |
+
if is_transformers_available():
|
77 |
+
from .ip_adapter import IPAdapterMixin
|
78 |
+
from .lora import LoraLoaderMixin, StableDiffusionXLLoraLoaderMixin
|
79 |
+
from .single_file import FromSingleFileMixin
|
80 |
+
from .textual_inversion import TextualInversionLoaderMixin
|
81 |
+
|
82 |
+
from .peft import PeftAdapterMixin
|
83 |
+
else:
|
84 |
+
import sys
|
85 |
+
|
86 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
diffusers/loaders/ip_adapter.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import os
|
15 |
+
from typing import Dict, Union
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from huggingface_hub.utils import validate_hf_hub_args
|
19 |
+
from safetensors import safe_open
|
20 |
+
|
21 |
+
from ..utils import (
|
22 |
+
_get_model_file,
|
23 |
+
is_transformers_available,
|
24 |
+
logging,
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
+
if is_transformers_available():
|
29 |
+
from transformers import (
|
30 |
+
CLIPImageProcessor,
|
31 |
+
CLIPVisionModelWithProjection,
|
32 |
+
)
|
33 |
+
|
34 |
+
from ..models.attention_processor import (
|
35 |
+
IPAdapterAttnProcessor,
|
36 |
+
IPAdapterAttnProcessor2_0,
|
37 |
+
)
|
38 |
+
|
39 |
+
logger = logging.get_logger(__name__)
|
40 |
+
|
41 |
+
|
42 |
+
class IPAdapterMixin:
|
43 |
+
"""Mixin for handling IP Adapters."""
|
44 |
+
|
45 |
+
@validate_hf_hub_args
|
46 |
+
def load_ip_adapter(
|
47 |
+
self,
|
48 |
+
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
|
49 |
+
subfolder: str,
|
50 |
+
weight_name: str,
|
51 |
+
**kwargs,
|
52 |
+
):
|
53 |
+
"""
|
54 |
+
Parameters:
|
55 |
+
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
56 |
+
Can be either:
|
57 |
+
|
58 |
+
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
|
59 |
+
the Hub.
|
60 |
+
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
|
61 |
+
with [`ModelMixin.save_pretrained`].
|
62 |
+
- A [torch state
|
63 |
+
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
64 |
+
|
65 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
66 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
67 |
+
is not used.
|
68 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
69 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
70 |
+
cached versions if they exist.
|
71 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
72 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
73 |
+
incompletely downloaded files are deleted.
|
74 |
+
proxies (`Dict[str, str]`, *optional*):
|
75 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
76 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
77 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
78 |
+
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
79 |
+
won't be downloaded from the Hub.
|
80 |
+
token (`str` or *bool*, *optional*):
|
81 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
82 |
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
83 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
84 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
85 |
+
allowed by Git.
|
86 |
+
subfolder (`str`, *optional*, defaults to `""`):
|
87 |
+
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
88 |
+
"""
|
89 |
+
|
90 |
+
# Load the main state dict first.
|
91 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
92 |
+
force_download = kwargs.pop("force_download", False)
|
93 |
+
resume_download = kwargs.pop("resume_download", False)
|
94 |
+
proxies = kwargs.pop("proxies", None)
|
95 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
96 |
+
token = kwargs.pop("token", None)
|
97 |
+
revision = kwargs.pop("revision", None)
|
98 |
+
|
99 |
+
user_agent = {
|
100 |
+
"file_type": "attn_procs_weights",
|
101 |
+
"framework": "pytorch",
|
102 |
+
}
|
103 |
+
|
104 |
+
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
|
105 |
+
model_file = _get_model_file(
|
106 |
+
pretrained_model_name_or_path_or_dict,
|
107 |
+
weights_name=weight_name,
|
108 |
+
cache_dir=cache_dir,
|
109 |
+
force_download=force_download,
|
110 |
+
resume_download=resume_download,
|
111 |
+
proxies=proxies,
|
112 |
+
local_files_only=local_files_only,
|
113 |
+
token=token,
|
114 |
+
revision=revision,
|
115 |
+
subfolder=subfolder,
|
116 |
+
user_agent=user_agent,
|
117 |
+
)
|
118 |
+
if weight_name.endswith(".safetensors"):
|
119 |
+
state_dict = {"image_proj": {}, "ip_adapter": {}}
|
120 |
+
with safe_open(model_file, framework="pt", device="cpu") as f:
|
121 |
+
for key in f.keys():
|
122 |
+
if key.startswith("image_proj."):
|
123 |
+
state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
|
124 |
+
elif key.startswith("ip_adapter."):
|
125 |
+
state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
|
126 |
+
else:
|
127 |
+
state_dict = torch.load(model_file, map_location="cpu")
|
128 |
+
else:
|
129 |
+
state_dict = pretrained_model_name_or_path_or_dict
|
130 |
+
|
131 |
+
keys = list(state_dict.keys())
|
132 |
+
if keys != ["image_proj", "ip_adapter"]:
|
133 |
+
raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
|
134 |
+
|
135 |
+
# load CLIP image encoder here if it has not been registered to the pipeline yet
|
136 |
+
if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
|
137 |
+
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
|
138 |
+
logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
|
139 |
+
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
|
140 |
+
pretrained_model_name_or_path_or_dict,
|
141 |
+
subfolder=os.path.join(subfolder, "image_encoder"),
|
142 |
+
).to(self.device, dtype=self.dtype)
|
143 |
+
self.image_encoder = image_encoder
|
144 |
+
self.register_to_config(image_encoder=["transformers", "CLIPVisionModelWithProjection"])
|
145 |
+
else:
|
146 |
+
raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
|
147 |
+
|
148 |
+
# create feature extractor if it has not been registered to the pipeline yet
|
149 |
+
if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
|
150 |
+
self.feature_extractor = CLIPImageProcessor()
|
151 |
+
self.register_to_config(feature_extractor=["transformers", "CLIPImageProcessor"])
|
152 |
+
|
153 |
+
# load ip-adapter into unet
|
154 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
155 |
+
unet._load_ip_adapter_weights(state_dict)
|
156 |
+
|
157 |
+
def set_ip_adapter_scale(self, scale):
|
158 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
159 |
+
for attn_processor in unet.attn_processors.values():
|
160 |
+
if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
|
161 |
+
attn_processor.scale = scale
|
162 |
+
|
163 |
+
def unload_ip_adapter(self):
|
164 |
+
"""
|
165 |
+
Unloads the IP Adapter weights
|
166 |
+
|
167 |
+
Examples:
|
168 |
+
|
169 |
+
```python
|
170 |
+
>>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
|
171 |
+
>>> pipeline.unload_ip_adapter()
|
172 |
+
>>> ...
|
173 |
+
```
|
174 |
+
"""
|
175 |
+
# remove CLIP image encoder
|
176 |
+
if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
|
177 |
+
self.image_encoder = None
|
178 |
+
self.register_to_config(image_encoder=[None, None])
|
179 |
+
|
180 |
+
# remove feature extractor
|
181 |
+
if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
|
182 |
+
self.feature_extractor = None
|
183 |
+
self.register_to_config(feature_extractor=[None, None])
|
184 |
+
|
185 |
+
# remove hidden encoder
|
186 |
+
self.unet.encoder_hid_proj = None
|
187 |
+
self.config.encoder_hid_dim_type = None
|
188 |
+
|
189 |
+
# restore original Unet attention processors layers
|
190 |
+
self.unet.set_default_attn_processor()
|
diffusers/loaders/lora.py
ADDED
@@ -0,0 +1,1553 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import inspect
|
15 |
+
import os
|
16 |
+
from contextlib import nullcontext
|
17 |
+
from typing import Callable, Dict, List, Optional, Union
|
18 |
+
|
19 |
+
import safetensors
|
20 |
+
import torch
|
21 |
+
from huggingface_hub import model_info
|
22 |
+
from huggingface_hub.constants import HF_HUB_OFFLINE
|
23 |
+
from huggingface_hub.utils import validate_hf_hub_args
|
24 |
+
from packaging import version
|
25 |
+
from torch import nn
|
26 |
+
|
27 |
+
from .. import __version__
|
28 |
+
from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
|
29 |
+
from ..utils import (
|
30 |
+
USE_PEFT_BACKEND,
|
31 |
+
_get_model_file,
|
32 |
+
convert_state_dict_to_diffusers,
|
33 |
+
convert_state_dict_to_peft,
|
34 |
+
convert_unet_state_dict_to_peft,
|
35 |
+
delete_adapter_layers,
|
36 |
+
deprecate,
|
37 |
+
get_adapter_name,
|
38 |
+
get_peft_kwargs,
|
39 |
+
is_accelerate_available,
|
40 |
+
is_transformers_available,
|
41 |
+
logging,
|
42 |
+
recurse_remove_peft_layers,
|
43 |
+
scale_lora_layers,
|
44 |
+
set_adapter_layers,
|
45 |
+
set_weights_and_activate_adapters,
|
46 |
+
)
|
47 |
+
from .lora_conversion_utils import _convert_kohya_lora_to_diffusers, _maybe_map_sgm_blocks_to_diffusers
|
48 |
+
|
49 |
+
|
50 |
+
if is_transformers_available():
|
51 |
+
from transformers import PreTrainedModel
|
52 |
+
|
53 |
+
from ..models.lora import PatchedLoraProjection, text_encoder_attn_modules, text_encoder_mlp_modules
|
54 |
+
|
55 |
+
if is_accelerate_available():
|
56 |
+
from accelerate import init_empty_weights
|
57 |
+
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
|
58 |
+
|
59 |
+
logger = logging.get_logger(__name__)
|
60 |
+
|
61 |
+
TEXT_ENCODER_NAME = "text_encoder"
|
62 |
+
UNET_NAME = "unet"
|
63 |
+
TRANSFORMER_NAME = "transformer"
|
64 |
+
|
65 |
+
LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
|
66 |
+
LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
|
67 |
+
|
68 |
+
LORA_DEPRECATION_MESSAGE = "You are using an old version of LoRA backend. This will be deprecated in the next releases in favor of PEFT make sure to install the latest PEFT and transformers packages in the future."
|
69 |
+
|
70 |
+
|
71 |
+
class LoraLoaderMixin:
|
72 |
+
r"""
|
73 |
+
Load LoRA layers into [`UNet2DConditionModel`] and
|
74 |
+
[`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).
|
75 |
+
"""
|
76 |
+
|
77 |
+
text_encoder_name = TEXT_ENCODER_NAME
|
78 |
+
unet_name = UNET_NAME
|
79 |
+
transformer_name = TRANSFORMER_NAME
|
80 |
+
num_fused_loras = 0
|
81 |
+
|
82 |
+
def load_lora_weights(
|
83 |
+
self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs
|
84 |
+
):
|
85 |
+
"""
|
86 |
+
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
|
87 |
+
`self.text_encoder`.
|
88 |
+
|
89 |
+
All kwargs are forwarded to `self.lora_state_dict`.
|
90 |
+
|
91 |
+
See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
|
92 |
+
|
93 |
+
See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
|
94 |
+
`self.unet`.
|
95 |
+
|
96 |
+
See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
|
97 |
+
into `self.text_encoder`.
|
98 |
+
|
99 |
+
Parameters:
|
100 |
+
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
101 |
+
See [`~loaders.LoraLoaderMixin.lora_state_dict`].
|
102 |
+
kwargs (`dict`, *optional*):
|
103 |
+
See [`~loaders.LoraLoaderMixin.lora_state_dict`].
|
104 |
+
adapter_name (`str`, *optional*):
|
105 |
+
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
|
106 |
+
`default_{i}` where i is the total number of adapters being loaded.
|
107 |
+
"""
|
108 |
+
# First, ensure that the checkpoint is a compatible one and can be successfully loaded.
|
109 |
+
state_dict, network_alphas = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs)
|
110 |
+
|
111 |
+
is_correct_format = all("lora" in key for key in state_dict.keys())
|
112 |
+
if not is_correct_format:
|
113 |
+
raise ValueError("Invalid LoRA checkpoint.")
|
114 |
+
|
115 |
+
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
|
116 |
+
|
117 |
+
self.load_lora_into_unet(
|
118 |
+
state_dict,
|
119 |
+
network_alphas=network_alphas,
|
120 |
+
unet=getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet,
|
121 |
+
low_cpu_mem_usage=low_cpu_mem_usage,
|
122 |
+
adapter_name=adapter_name,
|
123 |
+
_pipeline=self,
|
124 |
+
)
|
125 |
+
self.load_lora_into_text_encoder(
|
126 |
+
state_dict,
|
127 |
+
network_alphas=network_alphas,
|
128 |
+
text_encoder=getattr(self, self.text_encoder_name)
|
129 |
+
if not hasattr(self, "text_encoder")
|
130 |
+
else self.text_encoder,
|
131 |
+
lora_scale=self.lora_scale,
|
132 |
+
low_cpu_mem_usage=low_cpu_mem_usage,
|
133 |
+
adapter_name=adapter_name,
|
134 |
+
_pipeline=self,
|
135 |
+
)
|
136 |
+
|
137 |
+
@classmethod
|
138 |
+
@validate_hf_hub_args
|
139 |
+
def lora_state_dict(
|
140 |
+
cls,
|
141 |
+
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
|
142 |
+
**kwargs,
|
143 |
+
):
|
144 |
+
r"""
|
145 |
+
Return state dict for lora weights and the network alphas.
|
146 |
+
|
147 |
+
<Tip warning={true}>
|
148 |
+
|
149 |
+
We support loading A1111 formatted LoRA checkpoints in a limited capacity.
|
150 |
+
|
151 |
+
This function is experimental and might change in the future.
|
152 |
+
|
153 |
+
</Tip>
|
154 |
+
|
155 |
+
Parameters:
|
156 |
+
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
157 |
+
Can be either:
|
158 |
+
|
159 |
+
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
|
160 |
+
the Hub.
|
161 |
+
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
|
162 |
+
with [`ModelMixin.save_pretrained`].
|
163 |
+
- A [torch state
|
164 |
+
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
165 |
+
|
166 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
167 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
168 |
+
is not used.
|
169 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
170 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
171 |
+
cached versions if they exist.
|
172 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
173 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
174 |
+
incompletely downloaded files are deleted.
|
175 |
+
proxies (`Dict[str, str]`, *optional*):
|
176 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
177 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
178 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
179 |
+
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
180 |
+
won't be downloaded from the Hub.
|
181 |
+
token (`str` or *bool*, *optional*):
|
182 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
183 |
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
184 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
185 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
186 |
+
allowed by Git.
|
187 |
+
subfolder (`str`, *optional*, defaults to `""`):
|
188 |
+
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
189 |
+
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
|
190 |
+
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
|
191 |
+
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
|
192 |
+
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
|
193 |
+
argument to `True` will raise an error.
|
194 |
+
mirror (`str`, *optional*):
|
195 |
+
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
|
196 |
+
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
|
197 |
+
information.
|
198 |
+
|
199 |
+
"""
|
200 |
+
# Load the main state dict first which has the LoRA layers for either of
|
201 |
+
# UNet and text encoder or both.
|
202 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
203 |
+
force_download = kwargs.pop("force_download", False)
|
204 |
+
resume_download = kwargs.pop("resume_download", False)
|
205 |
+
proxies = kwargs.pop("proxies", None)
|
206 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
207 |
+
token = kwargs.pop("token", None)
|
208 |
+
revision = kwargs.pop("revision", None)
|
209 |
+
subfolder = kwargs.pop("subfolder", None)
|
210 |
+
weight_name = kwargs.pop("weight_name", None)
|
211 |
+
unet_config = kwargs.pop("unet_config", None)
|
212 |
+
use_safetensors = kwargs.pop("use_safetensors", None)
|
213 |
+
|
214 |
+
allow_pickle = False
|
215 |
+
if use_safetensors is None:
|
216 |
+
use_safetensors = True
|
217 |
+
allow_pickle = True
|
218 |
+
|
219 |
+
user_agent = {
|
220 |
+
"file_type": "attn_procs_weights",
|
221 |
+
"framework": "pytorch",
|
222 |
+
}
|
223 |
+
|
224 |
+
model_file = None
|
225 |
+
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
|
226 |
+
# Let's first try to load .safetensors weights
|
227 |
+
if (use_safetensors and weight_name is None) or (
|
228 |
+
weight_name is not None and weight_name.endswith(".safetensors")
|
229 |
+
):
|
230 |
+
try:
|
231 |
+
# Here we're relaxing the loading check to enable more Inference API
|
232 |
+
# friendliness where sometimes, it's not at all possible to automatically
|
233 |
+
# determine `weight_name`.
|
234 |
+
if weight_name is None:
|
235 |
+
weight_name = cls._best_guess_weight_name(
|
236 |
+
pretrained_model_name_or_path_or_dict,
|
237 |
+
file_extension=".safetensors",
|
238 |
+
local_files_only=local_files_only,
|
239 |
+
)
|
240 |
+
model_file = _get_model_file(
|
241 |
+
pretrained_model_name_or_path_or_dict,
|
242 |
+
weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
|
243 |
+
cache_dir=cache_dir,
|
244 |
+
force_download=force_download,
|
245 |
+
resume_download=resume_download,
|
246 |
+
proxies=proxies,
|
247 |
+
local_files_only=local_files_only,
|
248 |
+
token=token,
|
249 |
+
revision=revision,
|
250 |
+
subfolder=subfolder,
|
251 |
+
user_agent=user_agent,
|
252 |
+
)
|
253 |
+
state_dict = safetensors.torch.load_file(model_file, device="cpu")
|
254 |
+
except (IOError, safetensors.SafetensorError) as e:
|
255 |
+
if not allow_pickle:
|
256 |
+
raise e
|
257 |
+
# try loading non-safetensors weights
|
258 |
+
model_file = None
|
259 |
+
pass
|
260 |
+
|
261 |
+
if model_file is None:
|
262 |
+
if weight_name is None:
|
263 |
+
weight_name = cls._best_guess_weight_name(
|
264 |
+
pretrained_model_name_or_path_or_dict, file_extension=".bin", local_files_only=local_files_only
|
265 |
+
)
|
266 |
+
model_file = _get_model_file(
|
267 |
+
pretrained_model_name_or_path_or_dict,
|
268 |
+
weights_name=weight_name or LORA_WEIGHT_NAME,
|
269 |
+
cache_dir=cache_dir,
|
270 |
+
force_download=force_download,
|
271 |
+
resume_download=resume_download,
|
272 |
+
proxies=proxies,
|
273 |
+
local_files_only=local_files_only,
|
274 |
+
token=token,
|
275 |
+
revision=revision,
|
276 |
+
subfolder=subfolder,
|
277 |
+
user_agent=user_agent,
|
278 |
+
)
|
279 |
+
state_dict = torch.load(model_file, map_location="cpu")
|
280 |
+
else:
|
281 |
+
state_dict = pretrained_model_name_or_path_or_dict
|
282 |
+
|
283 |
+
network_alphas = None
|
284 |
+
# TODO: replace it with a method from `state_dict_utils`
|
285 |
+
if all(
|
286 |
+
(
|
287 |
+
k.startswith("lora_te_")
|
288 |
+
or k.startswith("lora_unet_")
|
289 |
+
or k.startswith("lora_te1_")
|
290 |
+
or k.startswith("lora_te2_")
|
291 |
+
)
|
292 |
+
for k in state_dict.keys()
|
293 |
+
):
|
294 |
+
# Map SDXL blocks correctly.
|
295 |
+
if unet_config is not None:
|
296 |
+
# use unet config to remap block numbers
|
297 |
+
state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config)
|
298 |
+
state_dict, network_alphas = _convert_kohya_lora_to_diffusers(state_dict)
|
299 |
+
|
300 |
+
return state_dict, network_alphas
|
301 |
+
|
302 |
+
@classmethod
|
303 |
+
def _best_guess_weight_name(
|
304 |
+
cls, pretrained_model_name_or_path_or_dict, file_extension=".safetensors", local_files_only=False
|
305 |
+
):
|
306 |
+
if local_files_only or HF_HUB_OFFLINE:
|
307 |
+
raise ValueError("When using the offline mode, you must specify a `weight_name`.")
|
308 |
+
|
309 |
+
targeted_files = []
|
310 |
+
|
311 |
+
if os.path.isfile(pretrained_model_name_or_path_or_dict):
|
312 |
+
return
|
313 |
+
elif os.path.isdir(pretrained_model_name_or_path_or_dict):
|
314 |
+
targeted_files = [
|
315 |
+
f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension)
|
316 |
+
]
|
317 |
+
else:
|
318 |
+
files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings
|
319 |
+
targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)]
|
320 |
+
if len(targeted_files) == 0:
|
321 |
+
return
|
322 |
+
|
323 |
+
# "scheduler" does not correspond to a LoRA checkpoint.
|
324 |
+
# "optimizer" does not correspond to a LoRA checkpoint
|
325 |
+
# only top-level checkpoints are considered and not the other ones, hence "checkpoint".
|
326 |
+
unallowed_substrings = {"scheduler", "optimizer", "checkpoint"}
|
327 |
+
targeted_files = list(
|
328 |
+
filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files)
|
329 |
+
)
|
330 |
+
|
331 |
+
if any(f.endswith(LORA_WEIGHT_NAME) for f in targeted_files):
|
332 |
+
targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME), targeted_files))
|
333 |
+
elif any(f.endswith(LORA_WEIGHT_NAME_SAFE) for f in targeted_files):
|
334 |
+
targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME_SAFE), targeted_files))
|
335 |
+
|
336 |
+
if len(targeted_files) > 1:
|
337 |
+
raise ValueError(
|
338 |
+
f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one `.safetensors` or `.bin` file in {pretrained_model_name_or_path_or_dict}."
|
339 |
+
)
|
340 |
+
weight_name = targeted_files[0]
|
341 |
+
return weight_name
|
342 |
+
|
343 |
+
@classmethod
|
344 |
+
def _optionally_disable_offloading(cls, _pipeline):
|
345 |
+
"""
|
346 |
+
Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU.
|
347 |
+
|
348 |
+
Args:
|
349 |
+
_pipeline (`DiffusionPipeline`):
|
350 |
+
The pipeline to disable offloading for.
|
351 |
+
|
352 |
+
Returns:
|
353 |
+
tuple:
|
354 |
+
A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True.
|
355 |
+
"""
|
356 |
+
is_model_cpu_offload = False
|
357 |
+
is_sequential_cpu_offload = False
|
358 |
+
|
359 |
+
if _pipeline is not None:
|
360 |
+
for _, component in _pipeline.components.items():
|
361 |
+
if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"):
|
362 |
+
if not is_model_cpu_offload:
|
363 |
+
is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload)
|
364 |
+
if not is_sequential_cpu_offload:
|
365 |
+
is_sequential_cpu_offload = isinstance(component._hf_hook, AlignDevicesHook)
|
366 |
+
|
367 |
+
logger.info(
|
368 |
+
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
|
369 |
+
)
|
370 |
+
remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
|
371 |
+
|
372 |
+
return (is_model_cpu_offload, is_sequential_cpu_offload)
|
373 |
+
|
374 |
+
@classmethod
|
375 |
+
def load_lora_into_unet(
|
376 |
+
cls, state_dict, network_alphas, unet, low_cpu_mem_usage=None, adapter_name=None, _pipeline=None
|
377 |
+
):
|
378 |
+
"""
|
379 |
+
This will load the LoRA layers specified in `state_dict` into `unet`.
|
380 |
+
|
381 |
+
Parameters:
|
382 |
+
state_dict (`dict`):
|
383 |
+
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
|
384 |
+
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
|
385 |
+
encoder lora layers.
|
386 |
+
network_alphas (`Dict[str, float]`):
|
387 |
+
See `LoRALinearLayer` for more details.
|
388 |
+
unet (`UNet2DConditionModel`):
|
389 |
+
The UNet model to load the LoRA layers into.
|
390 |
+
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
|
391 |
+
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
|
392 |
+
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
|
393 |
+
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
|
394 |
+
argument to `True` will raise an error.
|
395 |
+
adapter_name (`str`, *optional*):
|
396 |
+
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
|
397 |
+
`default_{i}` where i is the total number of adapters being loaded.
|
398 |
+
"""
|
399 |
+
low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
|
400 |
+
# If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
|
401 |
+
# then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as
|
402 |
+
# their prefixes.
|
403 |
+
keys = list(state_dict.keys())
|
404 |
+
|
405 |
+
if all(key.startswith("unet.unet") for key in keys):
|
406 |
+
deprecation_message = "Keys starting with 'unet.unet' are deprecated."
|
407 |
+
deprecate("unet.unet keys", "0.27", deprecation_message)
|
408 |
+
|
409 |
+
if all(key.startswith(cls.unet_name) or key.startswith(cls.text_encoder_name) for key in keys):
|
410 |
+
# Load the layers corresponding to UNet.
|
411 |
+
logger.info(f"Loading {cls.unet_name}.")
|
412 |
+
|
413 |
+
unet_keys = [k for k in keys if k.startswith(cls.unet_name)]
|
414 |
+
state_dict = {k.replace(f"{cls.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}
|
415 |
+
|
416 |
+
if network_alphas is not None:
|
417 |
+
alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.unet_name)]
|
418 |
+
network_alphas = {
|
419 |
+
k.replace(f"{cls.unet_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
|
420 |
+
}
|
421 |
+
|
422 |
+
else:
|
423 |
+
# Otherwise, we're dealing with the old format. This means the `state_dict` should only
|
424 |
+
# contain the module names of the `unet` as its keys WITHOUT any prefix.
|
425 |
+
if not USE_PEFT_BACKEND:
|
426 |
+
warn_message = "You have saved the LoRA weights using the old format. To convert the old LoRA weights to the new format, you can first load them in a dictionary and then create a new dictionary like the following: `new_state_dict = {f'unet.{module_name}': params for module_name, params in old_state_dict.items()}`."
|
427 |
+
logger.warn(warn_message)
|
428 |
+
|
429 |
+
if USE_PEFT_BACKEND and len(state_dict.keys()) > 0:
|
430 |
+
from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
|
431 |
+
|
432 |
+
if adapter_name in getattr(unet, "peft_config", {}):
|
433 |
+
raise ValueError(
|
434 |
+
f"Adapter name {adapter_name} already in use in the Unet - please select a new adapter name."
|
435 |
+
)
|
436 |
+
|
437 |
+
state_dict = convert_unet_state_dict_to_peft(state_dict)
|
438 |
+
|
439 |
+
if network_alphas is not None:
|
440 |
+
# The alphas state dict have the same structure as Unet, thus we convert it to peft format using
|
441 |
+
# `convert_unet_state_dict_to_peft` method.
|
442 |
+
network_alphas = convert_unet_state_dict_to_peft(network_alphas)
|
443 |
+
|
444 |
+
rank = {}
|
445 |
+
for key, val in state_dict.items():
|
446 |
+
if "lora_B" in key:
|
447 |
+
rank[key] = val.shape[1]
|
448 |
+
|
449 |
+
lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict, is_unet=True)
|
450 |
+
lora_config = LoraConfig(**lora_config_kwargs)
|
451 |
+
|
452 |
+
# adapter_name
|
453 |
+
if adapter_name is None:
|
454 |
+
adapter_name = get_adapter_name(unet)
|
455 |
+
|
456 |
+
# In case the pipeline has been already offloaded to CPU - temporarily remove the hooks
|
457 |
+
# otherwise loading LoRA weights will lead to an error
|
458 |
+
is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
|
459 |
+
|
460 |
+
inject_adapter_in_model(lora_config, unet, adapter_name=adapter_name)
|
461 |
+
incompatible_keys = set_peft_model_state_dict(unet, state_dict, adapter_name)
|
462 |
+
|
463 |
+
if incompatible_keys is not None:
|
464 |
+
# check only for unexpected keys
|
465 |
+
unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
|
466 |
+
if unexpected_keys:
|
467 |
+
logger.warning(
|
468 |
+
f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
|
469 |
+
f" {unexpected_keys}. "
|
470 |
+
)
|
471 |
+
|
472 |
+
# Offload back.
|
473 |
+
if is_model_cpu_offload:
|
474 |
+
_pipeline.enable_model_cpu_offload()
|
475 |
+
elif is_sequential_cpu_offload:
|
476 |
+
_pipeline.enable_sequential_cpu_offload()
|
477 |
+
# Unsafe code />
|
478 |
+
|
479 |
+
unet.load_attn_procs(
|
480 |
+
state_dict, network_alphas=network_alphas, low_cpu_mem_usage=low_cpu_mem_usage, _pipeline=_pipeline
|
481 |
+
)
|
482 |
+
|
483 |
+
@classmethod
|
484 |
+
def load_lora_into_text_encoder(
|
485 |
+
cls,
|
486 |
+
state_dict,
|
487 |
+
network_alphas,
|
488 |
+
text_encoder,
|
489 |
+
prefix=None,
|
490 |
+
lora_scale=1.0,
|
491 |
+
low_cpu_mem_usage=None,
|
492 |
+
adapter_name=None,
|
493 |
+
_pipeline=None,
|
494 |
+
):
|
495 |
+
"""
|
496 |
+
This will load the LoRA layers specified in `state_dict` into `text_encoder`
|
497 |
+
|
498 |
+
Parameters:
|
499 |
+
state_dict (`dict`):
|
500 |
+
A standard state dict containing the lora layer parameters. The key should be prefixed with an
|
501 |
+
additional `text_encoder` to distinguish between unet lora layers.
|
502 |
+
network_alphas (`Dict[str, float]`):
|
503 |
+
See `LoRALinearLayer` for more details.
|
504 |
+
text_encoder (`CLIPTextModel`):
|
505 |
+
The text encoder model to load the LoRA layers into.
|
506 |
+
prefix (`str`):
|
507 |
+
Expected prefix of the `text_encoder` in the `state_dict`.
|
508 |
+
lora_scale (`float`):
|
509 |
+
How much to scale the output of the lora linear layer before it is added with the output of the regular
|
510 |
+
lora layer.
|
511 |
+
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
|
512 |
+
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
|
513 |
+
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
|
514 |
+
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
|
515 |
+
argument to `True` will raise an error.
|
516 |
+
adapter_name (`str`, *optional*):
|
517 |
+
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
|
518 |
+
`default_{i}` where i is the total number of adapters being loaded.
|
519 |
+
"""
|
520 |
+
low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
|
521 |
+
|
522 |
+
# If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
|
523 |
+
# then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as
|
524 |
+
# their prefixes.
|
525 |
+
keys = list(state_dict.keys())
|
526 |
+
prefix = cls.text_encoder_name if prefix is None else prefix
|
527 |
+
|
528 |
+
# Safe prefix to check with.
|
529 |
+
if any(cls.text_encoder_name in key for key in keys):
|
530 |
+
# Load the layers corresponding to text encoder and make necessary adjustments.
|
531 |
+
text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix]
|
532 |
+
text_encoder_lora_state_dict = {
|
533 |
+
k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys
|
534 |
+
}
|
535 |
+
|
536 |
+
if len(text_encoder_lora_state_dict) > 0:
|
537 |
+
logger.info(f"Loading {prefix}.")
|
538 |
+
rank = {}
|
539 |
+
text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict)
|
540 |
+
|
541 |
+
if USE_PEFT_BACKEND:
|
542 |
+
# convert state dict
|
543 |
+
text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict)
|
544 |
+
|
545 |
+
for name, _ in text_encoder_attn_modules(text_encoder):
|
546 |
+
rank_key = f"{name}.out_proj.lora_B.weight"
|
547 |
+
rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]
|
548 |
+
|
549 |
+
patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
|
550 |
+
if patch_mlp:
|
551 |
+
for name, _ in text_encoder_mlp_modules(text_encoder):
|
552 |
+
rank_key_fc1 = f"{name}.fc1.lora_B.weight"
|
553 |
+
rank_key_fc2 = f"{name}.fc2.lora_B.weight"
|
554 |
+
|
555 |
+
rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
|
556 |
+
rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
|
557 |
+
else:
|
558 |
+
for name, _ in text_encoder_attn_modules(text_encoder):
|
559 |
+
rank_key = f"{name}.out_proj.lora_linear_layer.up.weight"
|
560 |
+
rank.update({rank_key: text_encoder_lora_state_dict[rank_key].shape[1]})
|
561 |
+
|
562 |
+
patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
|
563 |
+
if patch_mlp:
|
564 |
+
for name, _ in text_encoder_mlp_modules(text_encoder):
|
565 |
+
rank_key_fc1 = f"{name}.fc1.lora_linear_layer.up.weight"
|
566 |
+
rank_key_fc2 = f"{name}.fc2.lora_linear_layer.up.weight"
|
567 |
+
rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
|
568 |
+
rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
|
569 |
+
|
570 |
+
if network_alphas is not None:
|
571 |
+
alpha_keys = [
|
572 |
+
k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix
|
573 |
+
]
|
574 |
+
network_alphas = {
|
575 |
+
k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
|
576 |
+
}
|
577 |
+
|
578 |
+
if USE_PEFT_BACKEND:
|
579 |
+
from peft import LoraConfig
|
580 |
+
|
581 |
+
lora_config_kwargs = get_peft_kwargs(
|
582 |
+
rank, network_alphas, text_encoder_lora_state_dict, is_unet=False
|
583 |
+
)
|
584 |
+
|
585 |
+
lora_config = LoraConfig(**lora_config_kwargs)
|
586 |
+
|
587 |
+
# adapter_name
|
588 |
+
if adapter_name is None:
|
589 |
+
adapter_name = get_adapter_name(text_encoder)
|
590 |
+
|
591 |
+
is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
|
592 |
+
|
593 |
+
# inject LoRA layers and load the state dict
|
594 |
+
# in transformers we automatically check whether the adapter name is already in use or not
|
595 |
+
text_encoder.load_adapter(
|
596 |
+
adapter_name=adapter_name,
|
597 |
+
adapter_state_dict=text_encoder_lora_state_dict,
|
598 |
+
peft_config=lora_config,
|
599 |
+
)
|
600 |
+
|
601 |
+
# scale LoRA layers with `lora_scale`
|
602 |
+
scale_lora_layers(text_encoder, weight=lora_scale)
|
603 |
+
else:
|
604 |
+
cls._modify_text_encoder(
|
605 |
+
text_encoder,
|
606 |
+
lora_scale,
|
607 |
+
network_alphas,
|
608 |
+
rank=rank,
|
609 |
+
patch_mlp=patch_mlp,
|
610 |
+
low_cpu_mem_usage=low_cpu_mem_usage,
|
611 |
+
)
|
612 |
+
|
613 |
+
is_pipeline_offloaded = _pipeline is not None and any(
|
614 |
+
isinstance(c, torch.nn.Module) and hasattr(c, "_hf_hook")
|
615 |
+
for c in _pipeline.components.values()
|
616 |
+
)
|
617 |
+
if is_pipeline_offloaded and low_cpu_mem_usage:
|
618 |
+
low_cpu_mem_usage = True
|
619 |
+
logger.info(
|
620 |
+
f"Pipeline {_pipeline.__class__} is offloaded. Therefore low cpu mem usage loading is forced."
|
621 |
+
)
|
622 |
+
|
623 |
+
if low_cpu_mem_usage:
|
624 |
+
device = next(iter(text_encoder_lora_state_dict.values())).device
|
625 |
+
dtype = next(iter(text_encoder_lora_state_dict.values())).dtype
|
626 |
+
unexpected_keys = load_model_dict_into_meta(
|
627 |
+
text_encoder, text_encoder_lora_state_dict, device=device, dtype=dtype
|
628 |
+
)
|
629 |
+
else:
|
630 |
+
load_state_dict_results = text_encoder.load_state_dict(
|
631 |
+
text_encoder_lora_state_dict, strict=False
|
632 |
+
)
|
633 |
+
unexpected_keys = load_state_dict_results.unexpected_keys
|
634 |
+
|
635 |
+
if len(unexpected_keys) != 0:
|
636 |
+
raise ValueError(
|
637 |
+
f"failed to load text encoder state dict, unexpected keys: {load_state_dict_results.unexpected_keys}"
|
638 |
+
)
|
639 |
+
|
640 |
+
# <Unsafe code
|
641 |
+
# We can be sure that the following works as all we do is change the dtype and device of the text encoder
|
642 |
+
# Now we remove any existing hooks to
|
643 |
+
is_model_cpu_offload = False
|
644 |
+
is_sequential_cpu_offload = False
|
645 |
+
if _pipeline is not None:
|
646 |
+
for _, component in _pipeline.components.items():
|
647 |
+
if isinstance(component, torch.nn.Module):
|
648 |
+
if hasattr(component, "_hf_hook"):
|
649 |
+
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
|
650 |
+
is_sequential_cpu_offload = isinstance(
|
651 |
+
getattr(component, "_hf_hook"), AlignDevicesHook
|
652 |
+
)
|
653 |
+
logger.info(
|
654 |
+
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
|
655 |
+
)
|
656 |
+
remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
|
657 |
+
|
658 |
+
text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype)
|
659 |
+
|
660 |
+
# Offload back.
|
661 |
+
if is_model_cpu_offload:
|
662 |
+
_pipeline.enable_model_cpu_offload()
|
663 |
+
elif is_sequential_cpu_offload:
|
664 |
+
_pipeline.enable_sequential_cpu_offload()
|
665 |
+
# Unsafe code />
|
666 |
+
|
667 |
+
@classmethod
|
668 |
+
def load_lora_into_transformer(
|
669 |
+
cls, state_dict, network_alphas, transformer, low_cpu_mem_usage=None, adapter_name=None, _pipeline=None
|
670 |
+
):
|
671 |
+
"""
|
672 |
+
This will load the LoRA layers specified in `state_dict` into `transformer`.
|
673 |
+
|
674 |
+
Parameters:
|
675 |
+
state_dict (`dict`):
|
676 |
+
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
|
677 |
+
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
|
678 |
+
encoder lora layers.
|
679 |
+
network_alphas (`Dict[str, float]`):
|
680 |
+
See `LoRALinearLayer` for more details.
|
681 |
+
unet (`UNet2DConditionModel`):
|
682 |
+
The UNet model to load the LoRA layers into.
|
683 |
+
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
|
684 |
+
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
|
685 |
+
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
|
686 |
+
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
|
687 |
+
argument to `True` will raise an error.
|
688 |
+
adapter_name (`str`, *optional*):
|
689 |
+
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
|
690 |
+
`default_{i}` where i is the total number of adapters being loaded.
|
691 |
+
"""
|
692 |
+
low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
|
693 |
+
|
694 |
+
keys = list(state_dict.keys())
|
695 |
+
|
696 |
+
transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)]
|
697 |
+
state_dict = {
|
698 |
+
k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys
|
699 |
+
}
|
700 |
+
|
701 |
+
if network_alphas is not None:
|
702 |
+
alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.transformer_name)]
|
703 |
+
network_alphas = {
|
704 |
+
k.replace(f"{cls.transformer_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
|
705 |
+
}
|
706 |
+
|
707 |
+
if len(state_dict.keys()) > 0:
|
708 |
+
from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
|
709 |
+
|
710 |
+
if adapter_name in getattr(transformer, "peft_config", {}):
|
711 |
+
raise ValueError(
|
712 |
+
f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name."
|
713 |
+
)
|
714 |
+
|
715 |
+
rank = {}
|
716 |
+
for key, val in state_dict.items():
|
717 |
+
if "lora_B" in key:
|
718 |
+
rank[key] = val.shape[1]
|
719 |
+
|
720 |
+
lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict)
|
721 |
+
lora_config = LoraConfig(**lora_config_kwargs)
|
722 |
+
|
723 |
+
# adapter_name
|
724 |
+
if adapter_name is None:
|
725 |
+
adapter_name = get_adapter_name(transformer)
|
726 |
+
|
727 |
+
# In case the pipeline has been already offloaded to CPU - temporarily remove the hooks
|
728 |
+
# otherwise loading LoRA weights will lead to an error
|
729 |
+
is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
|
730 |
+
|
731 |
+
inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name)
|
732 |
+
incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name)
|
733 |
+
|
734 |
+
if incompatible_keys is not None:
|
735 |
+
# check only for unexpected keys
|
736 |
+
unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
|
737 |
+
if unexpected_keys:
|
738 |
+
logger.warning(
|
739 |
+
f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
|
740 |
+
f" {unexpected_keys}. "
|
741 |
+
)
|
742 |
+
|
743 |
+
# Offload back.
|
744 |
+
if is_model_cpu_offload:
|
745 |
+
_pipeline.enable_model_cpu_offload()
|
746 |
+
elif is_sequential_cpu_offload:
|
747 |
+
_pipeline.enable_sequential_cpu_offload()
|
748 |
+
# Unsafe code />
|
749 |
+
|
750 |
+
@property
|
751 |
+
def lora_scale(self) -> float:
|
752 |
+
# property function that returns the lora scale which can be set at run time by the pipeline.
|
753 |
+
# if _lora_scale has not been set, return 1
|
754 |
+
return self._lora_scale if hasattr(self, "_lora_scale") else 1.0
|
755 |
+
|
756 |
+
def _remove_text_encoder_monkey_patch(self):
|
757 |
+
if USE_PEFT_BACKEND:
|
758 |
+
remove_method = recurse_remove_peft_layers
|
759 |
+
else:
|
760 |
+
remove_method = self._remove_text_encoder_monkey_patch_classmethod
|
761 |
+
|
762 |
+
if hasattr(self, "text_encoder"):
|
763 |
+
remove_method(self.text_encoder)
|
764 |
+
|
765 |
+
# In case text encoder have no Lora attached
|
766 |
+
if USE_PEFT_BACKEND and getattr(self.text_encoder, "peft_config", None) is not None:
|
767 |
+
del self.text_encoder.peft_config
|
768 |
+
self.text_encoder._hf_peft_config_loaded = None
|
769 |
+
if hasattr(self, "text_encoder_2"):
|
770 |
+
remove_method(self.text_encoder_2)
|
771 |
+
if USE_PEFT_BACKEND:
|
772 |
+
del self.text_encoder_2.peft_config
|
773 |
+
self.text_encoder_2._hf_peft_config_loaded = None
|
774 |
+
|
775 |
+
@classmethod
|
776 |
+
def _remove_text_encoder_monkey_patch_classmethod(cls, text_encoder):
|
777 |
+
deprecate("_remove_text_encoder_monkey_patch_classmethod", "0.27", LORA_DEPRECATION_MESSAGE)
|
778 |
+
|
779 |
+
for _, attn_module in text_encoder_attn_modules(text_encoder):
|
780 |
+
if isinstance(attn_module.q_proj, PatchedLoraProjection):
|
781 |
+
attn_module.q_proj.lora_linear_layer = None
|
782 |
+
attn_module.k_proj.lora_linear_layer = None
|
783 |
+
attn_module.v_proj.lora_linear_layer = None
|
784 |
+
attn_module.out_proj.lora_linear_layer = None
|
785 |
+
|
786 |
+
for _, mlp_module in text_encoder_mlp_modules(text_encoder):
|
787 |
+
if isinstance(mlp_module.fc1, PatchedLoraProjection):
|
788 |
+
mlp_module.fc1.lora_linear_layer = None
|
789 |
+
mlp_module.fc2.lora_linear_layer = None
|
790 |
+
|
791 |
+
@classmethod
|
792 |
+
def _modify_text_encoder(
|
793 |
+
cls,
|
794 |
+
text_encoder,
|
795 |
+
lora_scale=1,
|
796 |
+
network_alphas=None,
|
797 |
+
rank: Union[Dict[str, int], int] = 4,
|
798 |
+
dtype=None,
|
799 |
+
patch_mlp=False,
|
800 |
+
low_cpu_mem_usage=False,
|
801 |
+
):
|
802 |
+
r"""
|
803 |
+
Monkey-patches the forward passes of attention modules of the text encoder.
|
804 |
+
"""
|
805 |
+
deprecate("_modify_text_encoder", "0.27", LORA_DEPRECATION_MESSAGE)
|
806 |
+
|
807 |
+
def create_patched_linear_lora(model, network_alpha, rank, dtype, lora_parameters):
|
808 |
+
linear_layer = model.regular_linear_layer if isinstance(model, PatchedLoraProjection) else model
|
809 |
+
ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
|
810 |
+
with ctx():
|
811 |
+
model = PatchedLoraProjection(linear_layer, lora_scale, network_alpha, rank, dtype=dtype)
|
812 |
+
|
813 |
+
lora_parameters.extend(model.lora_linear_layer.parameters())
|
814 |
+
return model
|
815 |
+
|
816 |
+
# First, remove any monkey-patch that might have been applied before
|
817 |
+
cls._remove_text_encoder_monkey_patch_classmethod(text_encoder)
|
818 |
+
|
819 |
+
lora_parameters = []
|
820 |
+
network_alphas = {} if network_alphas is None else network_alphas
|
821 |
+
is_network_alphas_populated = len(network_alphas) > 0
|
822 |
+
|
823 |
+
for name, attn_module in text_encoder_attn_modules(text_encoder):
|
824 |
+
query_alpha = network_alphas.pop(name + ".to_q_lora.down.weight.alpha", None)
|
825 |
+
key_alpha = network_alphas.pop(name + ".to_k_lora.down.weight.alpha", None)
|
826 |
+
value_alpha = network_alphas.pop(name + ".to_v_lora.down.weight.alpha", None)
|
827 |
+
out_alpha = network_alphas.pop(name + ".to_out_lora.down.weight.alpha", None)
|
828 |
+
|
829 |
+
if isinstance(rank, dict):
|
830 |
+
current_rank = rank.pop(f"{name}.out_proj.lora_linear_layer.up.weight")
|
831 |
+
else:
|
832 |
+
current_rank = rank
|
833 |
+
|
834 |
+
attn_module.q_proj = create_patched_linear_lora(
|
835 |
+
attn_module.q_proj, query_alpha, current_rank, dtype, lora_parameters
|
836 |
+
)
|
837 |
+
attn_module.k_proj = create_patched_linear_lora(
|
838 |
+
attn_module.k_proj, key_alpha, current_rank, dtype, lora_parameters
|
839 |
+
)
|
840 |
+
attn_module.v_proj = create_patched_linear_lora(
|
841 |
+
attn_module.v_proj, value_alpha, current_rank, dtype, lora_parameters
|
842 |
+
)
|
843 |
+
attn_module.out_proj = create_patched_linear_lora(
|
844 |
+
attn_module.out_proj, out_alpha, current_rank, dtype, lora_parameters
|
845 |
+
)
|
846 |
+
|
847 |
+
if patch_mlp:
|
848 |
+
for name, mlp_module in text_encoder_mlp_modules(text_encoder):
|
849 |
+
fc1_alpha = network_alphas.pop(name + ".fc1.lora_linear_layer.down.weight.alpha", None)
|
850 |
+
fc2_alpha = network_alphas.pop(name + ".fc2.lora_linear_layer.down.weight.alpha", None)
|
851 |
+
|
852 |
+
current_rank_fc1 = rank.pop(f"{name}.fc1.lora_linear_layer.up.weight")
|
853 |
+
current_rank_fc2 = rank.pop(f"{name}.fc2.lora_linear_layer.up.weight")
|
854 |
+
|
855 |
+
mlp_module.fc1 = create_patched_linear_lora(
|
856 |
+
mlp_module.fc1, fc1_alpha, current_rank_fc1, dtype, lora_parameters
|
857 |
+
)
|
858 |
+
mlp_module.fc2 = create_patched_linear_lora(
|
859 |
+
mlp_module.fc2, fc2_alpha, current_rank_fc2, dtype, lora_parameters
|
860 |
+
)
|
861 |
+
|
862 |
+
if is_network_alphas_populated and len(network_alphas) > 0:
|
863 |
+
raise ValueError(
|
864 |
+
f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
|
865 |
+
)
|
866 |
+
|
867 |
+
return lora_parameters
|
868 |
+
|
869 |
+
@classmethod
|
870 |
+
def save_lora_weights(
|
871 |
+
cls,
|
872 |
+
save_directory: Union[str, os.PathLike],
|
873 |
+
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
874 |
+
text_encoder_lora_layers: Dict[str, torch.nn.Module] = None,
|
875 |
+
transformer_lora_layers: Dict[str, torch.nn.Module] = None,
|
876 |
+
is_main_process: bool = True,
|
877 |
+
weight_name: str = None,
|
878 |
+
save_function: Callable = None,
|
879 |
+
safe_serialization: bool = True,
|
880 |
+
):
|
881 |
+
r"""
|
882 |
+
Save the LoRA parameters corresponding to the UNet and text encoder.
|
883 |
+
|
884 |
+
Arguments:
|
885 |
+
save_directory (`str` or `os.PathLike`):
|
886 |
+
Directory to save LoRA parameters to. Will be created if it doesn't exist.
|
887 |
+
unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
|
888 |
+
State dict of the LoRA layers corresponding to the `unet`.
|
889 |
+
text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
|
890 |
+
State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
|
891 |
+
encoder LoRA state dict because it comes from 🤗 Transformers.
|
892 |
+
is_main_process (`bool`, *optional*, defaults to `True`):
|
893 |
+
Whether the process calling this is the main process or not. Useful during distributed training and you
|
894 |
+
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
|
895 |
+
process to avoid race conditions.
|
896 |
+
save_function (`Callable`):
|
897 |
+
The function to use to save the state dictionary. Useful during distributed training when you need to
|
898 |
+
replace `torch.save` with another method. Can be configured with the environment variable
|
899 |
+
`DIFFUSERS_SAVE_MODE`.
|
900 |
+
safe_serialization (`bool`, *optional*, defaults to `True`):
|
901 |
+
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
|
902 |
+
"""
|
903 |
+
state_dict = {}
|
904 |
+
|
905 |
+
def pack_weights(layers, prefix):
|
906 |
+
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
|
907 |
+
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
|
908 |
+
return layers_state_dict
|
909 |
+
|
910 |
+
if not (unet_lora_layers or text_encoder_lora_layers or transformer_lora_layers):
|
911 |
+
raise ValueError(
|
912 |
+
"You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers`, or `transformer_lora_layers`."
|
913 |
+
)
|
914 |
+
|
915 |
+
if unet_lora_layers:
|
916 |
+
state_dict.update(pack_weights(unet_lora_layers, cls.unet_name))
|
917 |
+
|
918 |
+
if text_encoder_lora_layers:
|
919 |
+
state_dict.update(pack_weights(text_encoder_lora_layers, cls.text_encoder_name))
|
920 |
+
|
921 |
+
if transformer_lora_layers:
|
922 |
+
state_dict.update(pack_weights(transformer_lora_layers, "transformer"))
|
923 |
+
|
924 |
+
# Save the model
|
925 |
+
cls.write_lora_layers(
|
926 |
+
state_dict=state_dict,
|
927 |
+
save_directory=save_directory,
|
928 |
+
is_main_process=is_main_process,
|
929 |
+
weight_name=weight_name,
|
930 |
+
save_function=save_function,
|
931 |
+
safe_serialization=safe_serialization,
|
932 |
+
)
|
933 |
+
|
934 |
+
@staticmethod
|
935 |
+
def write_lora_layers(
|
936 |
+
state_dict: Dict[str, torch.Tensor],
|
937 |
+
save_directory: str,
|
938 |
+
is_main_process: bool,
|
939 |
+
weight_name: str,
|
940 |
+
save_function: Callable,
|
941 |
+
safe_serialization: bool,
|
942 |
+
):
|
943 |
+
if os.path.isfile(save_directory):
|
944 |
+
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
|
945 |
+
return
|
946 |
+
|
947 |
+
if save_function is None:
|
948 |
+
if safe_serialization:
|
949 |
+
|
950 |
+
def save_function(weights, filename):
|
951 |
+
return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})
|
952 |
+
|
953 |
+
else:
|
954 |
+
save_function = torch.save
|
955 |
+
|
956 |
+
os.makedirs(save_directory, exist_ok=True)
|
957 |
+
|
958 |
+
if weight_name is None:
|
959 |
+
if safe_serialization:
|
960 |
+
weight_name = LORA_WEIGHT_NAME_SAFE
|
961 |
+
else:
|
962 |
+
weight_name = LORA_WEIGHT_NAME
|
963 |
+
|
964 |
+
save_function(state_dict, os.path.join(save_directory, weight_name))
|
965 |
+
logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
|
966 |
+
|
967 |
+
def unload_lora_weights(self):
|
968 |
+
"""
|
969 |
+
Unloads the LoRA parameters.
|
970 |
+
|
971 |
+
Examples:
|
972 |
+
|
973 |
+
```python
|
974 |
+
>>> # Assuming `pipeline` is already loaded with the LoRA parameters.
|
975 |
+
>>> pipeline.unload_lora_weights()
|
976 |
+
>>> ...
|
977 |
+
```
|
978 |
+
"""
|
979 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
980 |
+
|
981 |
+
if not USE_PEFT_BACKEND:
|
982 |
+
if version.parse(__version__) > version.parse("0.23"):
|
983 |
+
logger.warning(
|
984 |
+
"You are using `unload_lora_weights` to disable and unload lora weights. If you want to iteratively enable and disable adapter weights,"
|
985 |
+
"you can use `pipe.enable_lora()` or `pipe.disable_lora()`. After installing the latest version of PEFT."
|
986 |
+
)
|
987 |
+
|
988 |
+
for _, module in unet.named_modules():
|
989 |
+
if hasattr(module, "set_lora_layer"):
|
990 |
+
module.set_lora_layer(None)
|
991 |
+
else:
|
992 |
+
recurse_remove_peft_layers(unet)
|
993 |
+
if hasattr(unet, "peft_config"):
|
994 |
+
del unet.peft_config
|
995 |
+
|
996 |
+
# Safe to call the following regardless of LoRA.
|
997 |
+
self._remove_text_encoder_monkey_patch()
|
998 |
+
|
999 |
+
def fuse_lora(
|
1000 |
+
self,
|
1001 |
+
fuse_unet: bool = True,
|
1002 |
+
fuse_text_encoder: bool = True,
|
1003 |
+
lora_scale: float = 1.0,
|
1004 |
+
safe_fusing: bool = False,
|
1005 |
+
adapter_names: Optional[List[str]] = None,
|
1006 |
+
):
|
1007 |
+
r"""
|
1008 |
+
Fuses the LoRA parameters into the original parameters of the corresponding blocks.
|
1009 |
+
|
1010 |
+
<Tip warning={true}>
|
1011 |
+
|
1012 |
+
This is an experimental API.
|
1013 |
+
|
1014 |
+
</Tip>
|
1015 |
+
|
1016 |
+
Args:
|
1017 |
+
fuse_unet (`bool`, defaults to `True`): Whether to fuse the UNet LoRA parameters.
|
1018 |
+
fuse_text_encoder (`bool`, defaults to `True`):
|
1019 |
+
Whether to fuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
|
1020 |
+
LoRA parameters then it won't have any effect.
|
1021 |
+
lora_scale (`float`, defaults to 1.0):
|
1022 |
+
Controls how much to influence the outputs with the LoRA parameters.
|
1023 |
+
safe_fusing (`bool`, defaults to `False`):
|
1024 |
+
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
|
1025 |
+
adapter_names (`List[str]`, *optional*):
|
1026 |
+
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
|
1027 |
+
|
1028 |
+
Example:
|
1029 |
+
|
1030 |
+
```py
|
1031 |
+
from diffusers import DiffusionPipeline
|
1032 |
+
import torch
|
1033 |
+
|
1034 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
1035 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
1036 |
+
).to("cuda")
|
1037 |
+
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
|
1038 |
+
pipeline.fuse_lora(lora_scale=0.7)
|
1039 |
+
```
|
1040 |
+
"""
|
1041 |
+
if fuse_unet or fuse_text_encoder:
|
1042 |
+
self.num_fused_loras += 1
|
1043 |
+
if self.num_fused_loras > 1:
|
1044 |
+
logger.warn(
|
1045 |
+
"The current API is supported for operating with a single LoRA file. You are trying to load and fuse more than one LoRA which is not well-supported.",
|
1046 |
+
)
|
1047 |
+
|
1048 |
+
if fuse_unet:
|
1049 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1050 |
+
unet.fuse_lora(lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names)
|
1051 |
+
|
1052 |
+
if USE_PEFT_BACKEND:
|
1053 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
1054 |
+
|
1055 |
+
def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, adapter_names=None):
|
1056 |
+
merge_kwargs = {"safe_merge": safe_fusing}
|
1057 |
+
|
1058 |
+
for module in text_encoder.modules():
|
1059 |
+
if isinstance(module, BaseTunerLayer):
|
1060 |
+
if lora_scale != 1.0:
|
1061 |
+
module.scale_layer(lora_scale)
|
1062 |
+
|
1063 |
+
# For BC with previous PEFT versions, we need to check the signature
|
1064 |
+
# of the `merge` method to see if it supports the `adapter_names` argument.
|
1065 |
+
supported_merge_kwargs = list(inspect.signature(module.merge).parameters)
|
1066 |
+
if "adapter_names" in supported_merge_kwargs:
|
1067 |
+
merge_kwargs["adapter_names"] = adapter_names
|
1068 |
+
elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None:
|
1069 |
+
raise ValueError(
|
1070 |
+
"The `adapter_names` argument is not supported with your PEFT version. "
|
1071 |
+
"Please upgrade to the latest version of PEFT. `pip install -U peft`"
|
1072 |
+
)
|
1073 |
+
|
1074 |
+
module.merge(**merge_kwargs)
|
1075 |
+
|
1076 |
+
else:
|
1077 |
+
deprecate("fuse_text_encoder_lora", "0.27", LORA_DEPRECATION_MESSAGE)
|
1078 |
+
|
1079 |
+
def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, **kwargs):
|
1080 |
+
if "adapter_names" in kwargs and kwargs["adapter_names"] is not None:
|
1081 |
+
raise ValueError(
|
1082 |
+
"The `adapter_names` argument is not supported in your environment. Please switch to PEFT "
|
1083 |
+
"backend to use this argument by installing latest PEFT and transformers."
|
1084 |
+
" `pip install -U peft transformers`"
|
1085 |
+
)
|
1086 |
+
|
1087 |
+
for _, attn_module in text_encoder_attn_modules(text_encoder):
|
1088 |
+
if isinstance(attn_module.q_proj, PatchedLoraProjection):
|
1089 |
+
attn_module.q_proj._fuse_lora(lora_scale, safe_fusing)
|
1090 |
+
attn_module.k_proj._fuse_lora(lora_scale, safe_fusing)
|
1091 |
+
attn_module.v_proj._fuse_lora(lora_scale, safe_fusing)
|
1092 |
+
attn_module.out_proj._fuse_lora(lora_scale, safe_fusing)
|
1093 |
+
|
1094 |
+
for _, mlp_module in text_encoder_mlp_modules(text_encoder):
|
1095 |
+
if isinstance(mlp_module.fc1, PatchedLoraProjection):
|
1096 |
+
mlp_module.fc1._fuse_lora(lora_scale, safe_fusing)
|
1097 |
+
mlp_module.fc2._fuse_lora(lora_scale, safe_fusing)
|
1098 |
+
|
1099 |
+
if fuse_text_encoder:
|
1100 |
+
if hasattr(self, "text_encoder"):
|
1101 |
+
fuse_text_encoder_lora(self.text_encoder, lora_scale, safe_fusing, adapter_names=adapter_names)
|
1102 |
+
if hasattr(self, "text_encoder_2"):
|
1103 |
+
fuse_text_encoder_lora(self.text_encoder_2, lora_scale, safe_fusing, adapter_names=adapter_names)
|
1104 |
+
|
1105 |
+
def unfuse_lora(self, unfuse_unet: bool = True, unfuse_text_encoder: bool = True):
|
1106 |
+
r"""
|
1107 |
+
Reverses the effect of
|
1108 |
+
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora).
|
1109 |
+
|
1110 |
+
<Tip warning={true}>
|
1111 |
+
|
1112 |
+
This is an experimental API.
|
1113 |
+
|
1114 |
+
</Tip>
|
1115 |
+
|
1116 |
+
Args:
|
1117 |
+
unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
|
1118 |
+
unfuse_text_encoder (`bool`, defaults to `True`):
|
1119 |
+
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
|
1120 |
+
LoRA parameters then it won't have any effect.
|
1121 |
+
"""
|
1122 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1123 |
+
if unfuse_unet:
|
1124 |
+
if not USE_PEFT_BACKEND:
|
1125 |
+
unet.unfuse_lora()
|
1126 |
+
else:
|
1127 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
1128 |
+
|
1129 |
+
for module in unet.modules():
|
1130 |
+
if isinstance(module, BaseTunerLayer):
|
1131 |
+
module.unmerge()
|
1132 |
+
|
1133 |
+
if USE_PEFT_BACKEND:
|
1134 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
1135 |
+
|
1136 |
+
def unfuse_text_encoder_lora(text_encoder):
|
1137 |
+
for module in text_encoder.modules():
|
1138 |
+
if isinstance(module, BaseTunerLayer):
|
1139 |
+
module.unmerge()
|
1140 |
+
|
1141 |
+
else:
|
1142 |
+
deprecate("unfuse_text_encoder_lora", "0.27", LORA_DEPRECATION_MESSAGE)
|
1143 |
+
|
1144 |
+
def unfuse_text_encoder_lora(text_encoder):
|
1145 |
+
for _, attn_module in text_encoder_attn_modules(text_encoder):
|
1146 |
+
if isinstance(attn_module.q_proj, PatchedLoraProjection):
|
1147 |
+
attn_module.q_proj._unfuse_lora()
|
1148 |
+
attn_module.k_proj._unfuse_lora()
|
1149 |
+
attn_module.v_proj._unfuse_lora()
|
1150 |
+
attn_module.out_proj._unfuse_lora()
|
1151 |
+
|
1152 |
+
for _, mlp_module in text_encoder_mlp_modules(text_encoder):
|
1153 |
+
if isinstance(mlp_module.fc1, PatchedLoraProjection):
|
1154 |
+
mlp_module.fc1._unfuse_lora()
|
1155 |
+
mlp_module.fc2._unfuse_lora()
|
1156 |
+
|
1157 |
+
if unfuse_text_encoder:
|
1158 |
+
if hasattr(self, "text_encoder"):
|
1159 |
+
unfuse_text_encoder_lora(self.text_encoder)
|
1160 |
+
if hasattr(self, "text_encoder_2"):
|
1161 |
+
unfuse_text_encoder_lora(self.text_encoder_2)
|
1162 |
+
|
1163 |
+
self.num_fused_loras -= 1
|
1164 |
+
|
1165 |
+
def set_adapters_for_text_encoder(
|
1166 |
+
self,
|
1167 |
+
adapter_names: Union[List[str], str],
|
1168 |
+
text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
|
1169 |
+
text_encoder_weights: List[float] = None,
|
1170 |
+
):
|
1171 |
+
"""
|
1172 |
+
Sets the adapter layers for the text encoder.
|
1173 |
+
|
1174 |
+
Args:
|
1175 |
+
adapter_names (`List[str]` or `str`):
|
1176 |
+
The names of the adapters to use.
|
1177 |
+
text_encoder (`torch.nn.Module`, *optional*):
|
1178 |
+
The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
|
1179 |
+
attribute.
|
1180 |
+
text_encoder_weights (`List[float]`, *optional*):
|
1181 |
+
The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters.
|
1182 |
+
"""
|
1183 |
+
if not USE_PEFT_BACKEND:
|
1184 |
+
raise ValueError("PEFT backend is required for this method.")
|
1185 |
+
|
1186 |
+
def process_weights(adapter_names, weights):
|
1187 |
+
if weights is None:
|
1188 |
+
weights = [1.0] * len(adapter_names)
|
1189 |
+
elif isinstance(weights, float):
|
1190 |
+
weights = [weights]
|
1191 |
+
|
1192 |
+
if len(adapter_names) != len(weights):
|
1193 |
+
raise ValueError(
|
1194 |
+
f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}"
|
1195 |
+
)
|
1196 |
+
return weights
|
1197 |
+
|
1198 |
+
adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
|
1199 |
+
text_encoder_weights = process_weights(adapter_names, text_encoder_weights)
|
1200 |
+
text_encoder = text_encoder or getattr(self, "text_encoder", None)
|
1201 |
+
if text_encoder is None:
|
1202 |
+
raise ValueError(
|
1203 |
+
"The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead."
|
1204 |
+
)
|
1205 |
+
set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights)
|
1206 |
+
|
1207 |
+
def disable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None):
|
1208 |
+
"""
|
1209 |
+
Disables the LoRA layers for the text encoder.
|
1210 |
+
|
1211 |
+
Args:
|
1212 |
+
text_encoder (`torch.nn.Module`, *optional*):
|
1213 |
+
The text encoder module to disable the LoRA layers for. If `None`, it will try to get the
|
1214 |
+
`text_encoder` attribute.
|
1215 |
+
"""
|
1216 |
+
if not USE_PEFT_BACKEND:
|
1217 |
+
raise ValueError("PEFT backend is required for this method.")
|
1218 |
+
|
1219 |
+
text_encoder = text_encoder or getattr(self, "text_encoder", None)
|
1220 |
+
if text_encoder is None:
|
1221 |
+
raise ValueError("Text Encoder not found.")
|
1222 |
+
set_adapter_layers(text_encoder, enabled=False)
|
1223 |
+
|
1224 |
+
def enable_lora_for_text_encoder(self, text_encoder: Optional["PreTrainedModel"] = None):
|
1225 |
+
"""
|
1226 |
+
Enables the LoRA layers for the text encoder.
|
1227 |
+
|
1228 |
+
Args:
|
1229 |
+
text_encoder (`torch.nn.Module`, *optional*):
|
1230 |
+
The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder`
|
1231 |
+
attribute.
|
1232 |
+
"""
|
1233 |
+
if not USE_PEFT_BACKEND:
|
1234 |
+
raise ValueError("PEFT backend is required for this method.")
|
1235 |
+
text_encoder = text_encoder or getattr(self, "text_encoder", None)
|
1236 |
+
if text_encoder is None:
|
1237 |
+
raise ValueError("Text Encoder not found.")
|
1238 |
+
set_adapter_layers(self.text_encoder, enabled=True)
|
1239 |
+
|
1240 |
+
def set_adapters(
|
1241 |
+
self,
|
1242 |
+
adapter_names: Union[List[str], str],
|
1243 |
+
adapter_weights: Optional[List[float]] = None,
|
1244 |
+
):
|
1245 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1246 |
+
# Handle the UNET
|
1247 |
+
unet.set_adapters(adapter_names, adapter_weights)
|
1248 |
+
|
1249 |
+
# Handle the Text Encoder
|
1250 |
+
if hasattr(self, "text_encoder"):
|
1251 |
+
self.set_adapters_for_text_encoder(adapter_names, self.text_encoder, adapter_weights)
|
1252 |
+
if hasattr(self, "text_encoder_2"):
|
1253 |
+
self.set_adapters_for_text_encoder(adapter_names, self.text_encoder_2, adapter_weights)
|
1254 |
+
|
1255 |
+
def disable_lora(self):
|
1256 |
+
if not USE_PEFT_BACKEND:
|
1257 |
+
raise ValueError("PEFT backend is required for this method.")
|
1258 |
+
|
1259 |
+
# Disable unet adapters
|
1260 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1261 |
+
unet.disable_lora()
|
1262 |
+
|
1263 |
+
# Disable text encoder adapters
|
1264 |
+
if hasattr(self, "text_encoder"):
|
1265 |
+
self.disable_lora_for_text_encoder(self.text_encoder)
|
1266 |
+
if hasattr(self, "text_encoder_2"):
|
1267 |
+
self.disable_lora_for_text_encoder(self.text_encoder_2)
|
1268 |
+
|
1269 |
+
def enable_lora(self):
|
1270 |
+
if not USE_PEFT_BACKEND:
|
1271 |
+
raise ValueError("PEFT backend is required for this method.")
|
1272 |
+
|
1273 |
+
# Enable unet adapters
|
1274 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1275 |
+
unet.enable_lora()
|
1276 |
+
|
1277 |
+
# Enable text encoder adapters
|
1278 |
+
if hasattr(self, "text_encoder"):
|
1279 |
+
self.enable_lora_for_text_encoder(self.text_encoder)
|
1280 |
+
if hasattr(self, "text_encoder_2"):
|
1281 |
+
self.enable_lora_for_text_encoder(self.text_encoder_2)
|
1282 |
+
|
1283 |
+
def delete_adapters(self, adapter_names: Union[List[str], str]):
|
1284 |
+
"""
|
1285 |
+
Args:
|
1286 |
+
Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s).
|
1287 |
+
adapter_names (`Union[List[str], str]`):
|
1288 |
+
The names of the adapter to delete. Can be a single string or a list of strings
|
1289 |
+
"""
|
1290 |
+
if not USE_PEFT_BACKEND:
|
1291 |
+
raise ValueError("PEFT backend is required for this method.")
|
1292 |
+
|
1293 |
+
if isinstance(adapter_names, str):
|
1294 |
+
adapter_names = [adapter_names]
|
1295 |
+
|
1296 |
+
# Delete unet adapters
|
1297 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1298 |
+
unet.delete_adapters(adapter_names)
|
1299 |
+
|
1300 |
+
for adapter_name in adapter_names:
|
1301 |
+
# Delete text encoder adapters
|
1302 |
+
if hasattr(self, "text_encoder"):
|
1303 |
+
delete_adapter_layers(self.text_encoder, adapter_name)
|
1304 |
+
if hasattr(self, "text_encoder_2"):
|
1305 |
+
delete_adapter_layers(self.text_encoder_2, adapter_name)
|
1306 |
+
|
1307 |
+
def get_active_adapters(self) -> List[str]:
|
1308 |
+
"""
|
1309 |
+
Gets the list of the current active adapters.
|
1310 |
+
|
1311 |
+
Example:
|
1312 |
+
|
1313 |
+
```python
|
1314 |
+
from diffusers import DiffusionPipeline
|
1315 |
+
|
1316 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
1317 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
1318 |
+
).to("cuda")
|
1319 |
+
pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
|
1320 |
+
pipeline.get_active_adapters()
|
1321 |
+
```
|
1322 |
+
"""
|
1323 |
+
if not USE_PEFT_BACKEND:
|
1324 |
+
raise ValueError(
|
1325 |
+
"PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
|
1326 |
+
)
|
1327 |
+
|
1328 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
1329 |
+
|
1330 |
+
active_adapters = []
|
1331 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1332 |
+
for module in unet.modules():
|
1333 |
+
if isinstance(module, BaseTunerLayer):
|
1334 |
+
active_adapters = module.active_adapters
|
1335 |
+
break
|
1336 |
+
|
1337 |
+
return active_adapters
|
1338 |
+
|
1339 |
+
def get_list_adapters(self) -> Dict[str, List[str]]:
|
1340 |
+
"""
|
1341 |
+
Gets the current list of all available adapters in the pipeline.
|
1342 |
+
"""
|
1343 |
+
if not USE_PEFT_BACKEND:
|
1344 |
+
raise ValueError(
|
1345 |
+
"PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
|
1346 |
+
)
|
1347 |
+
|
1348 |
+
set_adapters = {}
|
1349 |
+
|
1350 |
+
if hasattr(self, "text_encoder") and hasattr(self.text_encoder, "peft_config"):
|
1351 |
+
set_adapters["text_encoder"] = list(self.text_encoder.peft_config.keys())
|
1352 |
+
|
1353 |
+
if hasattr(self, "text_encoder_2") and hasattr(self.text_encoder_2, "peft_config"):
|
1354 |
+
set_adapters["text_encoder_2"] = list(self.text_encoder_2.peft_config.keys())
|
1355 |
+
|
1356 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1357 |
+
if hasattr(self, self.unet_name) and hasattr(unet, "peft_config"):
|
1358 |
+
set_adapters[self.unet_name] = list(self.unet.peft_config.keys())
|
1359 |
+
|
1360 |
+
return set_adapters
|
1361 |
+
|
1362 |
+
def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, str, int]) -> None:
|
1363 |
+
"""
|
1364 |
+
Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case
|
1365 |
+
you want to load multiple adapters and free some GPU memory.
|
1366 |
+
|
1367 |
+
Args:
|
1368 |
+
adapter_names (`List[str]`):
|
1369 |
+
List of adapters to send device to.
|
1370 |
+
device (`Union[torch.device, str, int]`):
|
1371 |
+
Device to send the adapters to. Can be either a torch device, a str or an integer.
|
1372 |
+
"""
|
1373 |
+
if not USE_PEFT_BACKEND:
|
1374 |
+
raise ValueError("PEFT backend is required for this method.")
|
1375 |
+
|
1376 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
1377 |
+
|
1378 |
+
# Handle the UNET
|
1379 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
1380 |
+
for unet_module in unet.modules():
|
1381 |
+
if isinstance(unet_module, BaseTunerLayer):
|
1382 |
+
for adapter_name in adapter_names:
|
1383 |
+
unet_module.lora_A[adapter_name].to(device)
|
1384 |
+
unet_module.lora_B[adapter_name].to(device)
|
1385 |
+
|
1386 |
+
# Handle the text encoder
|
1387 |
+
modules_to_process = []
|
1388 |
+
if hasattr(self, "text_encoder"):
|
1389 |
+
modules_to_process.append(self.text_encoder)
|
1390 |
+
|
1391 |
+
if hasattr(self, "text_encoder_2"):
|
1392 |
+
modules_to_process.append(self.text_encoder_2)
|
1393 |
+
|
1394 |
+
for text_encoder in modules_to_process:
|
1395 |
+
# loop over submodules
|
1396 |
+
for text_encoder_module in text_encoder.modules():
|
1397 |
+
if isinstance(text_encoder_module, BaseTunerLayer):
|
1398 |
+
for adapter_name in adapter_names:
|
1399 |
+
text_encoder_module.lora_A[adapter_name].to(device)
|
1400 |
+
text_encoder_module.lora_B[adapter_name].to(device)
|
1401 |
+
|
1402 |
+
|
1403 |
+
class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin):
|
1404 |
+
"""This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL"""
|
1405 |
+
|
1406 |
+
# Overrride to properly handle the loading and unloading of the additional text encoder.
|
1407 |
+
def load_lora_weights(
|
1408 |
+
self,
|
1409 |
+
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
|
1410 |
+
adapter_name: Optional[str] = None,
|
1411 |
+
**kwargs,
|
1412 |
+
):
|
1413 |
+
"""
|
1414 |
+
Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
|
1415 |
+
`self.text_encoder`.
|
1416 |
+
|
1417 |
+
All kwargs are forwarded to `self.lora_state_dict`.
|
1418 |
+
|
1419 |
+
See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
|
1420 |
+
|
1421 |
+
See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
|
1422 |
+
`self.unet`.
|
1423 |
+
|
1424 |
+
See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
|
1425 |
+
into `self.text_encoder`.
|
1426 |
+
|
1427 |
+
Parameters:
|
1428 |
+
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
1429 |
+
See [`~loaders.LoraLoaderMixin.lora_state_dict`].
|
1430 |
+
adapter_name (`str`, *optional*):
|
1431 |
+
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
|
1432 |
+
`default_{i}` where i is the total number of adapters being loaded.
|
1433 |
+
kwargs (`dict`, *optional*):
|
1434 |
+
See [`~loaders.LoraLoaderMixin.lora_state_dict`].
|
1435 |
+
"""
|
1436 |
+
# We could have accessed the unet config from `lora_state_dict()` too. We pass
|
1437 |
+
# it here explicitly to be able to tell that it's coming from an SDXL
|
1438 |
+
# pipeline.
|
1439 |
+
|
1440 |
+
# First, ensure that the checkpoint is a compatible one and can be successfully loaded.
|
1441 |
+
state_dict, network_alphas = self.lora_state_dict(
|
1442 |
+
pretrained_model_name_or_path_or_dict,
|
1443 |
+
unet_config=self.unet.config,
|
1444 |
+
**kwargs,
|
1445 |
+
)
|
1446 |
+
is_correct_format = all("lora" in key for key in state_dict.keys())
|
1447 |
+
if not is_correct_format:
|
1448 |
+
raise ValueError("Invalid LoRA checkpoint.")
|
1449 |
+
|
1450 |
+
self.load_lora_into_unet(
|
1451 |
+
state_dict, network_alphas=network_alphas, unet=self.unet, adapter_name=adapter_name, _pipeline=self
|
1452 |
+
)
|
1453 |
+
text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
|
1454 |
+
if len(text_encoder_state_dict) > 0:
|
1455 |
+
self.load_lora_into_text_encoder(
|
1456 |
+
text_encoder_state_dict,
|
1457 |
+
network_alphas=network_alphas,
|
1458 |
+
text_encoder=self.text_encoder,
|
1459 |
+
prefix="text_encoder",
|
1460 |
+
lora_scale=self.lora_scale,
|
1461 |
+
adapter_name=adapter_name,
|
1462 |
+
_pipeline=self,
|
1463 |
+
)
|
1464 |
+
|
1465 |
+
text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
|
1466 |
+
if len(text_encoder_2_state_dict) > 0:
|
1467 |
+
self.load_lora_into_text_encoder(
|
1468 |
+
text_encoder_2_state_dict,
|
1469 |
+
network_alphas=network_alphas,
|
1470 |
+
text_encoder=self.text_encoder_2,
|
1471 |
+
prefix="text_encoder_2",
|
1472 |
+
lora_scale=self.lora_scale,
|
1473 |
+
adapter_name=adapter_name,
|
1474 |
+
_pipeline=self,
|
1475 |
+
)
|
1476 |
+
|
1477 |
+
@classmethod
|
1478 |
+
def save_lora_weights(
|
1479 |
+
cls,
|
1480 |
+
save_directory: Union[str, os.PathLike],
|
1481 |
+
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
1482 |
+
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
1483 |
+
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
1484 |
+
is_main_process: bool = True,
|
1485 |
+
weight_name: str = None,
|
1486 |
+
save_function: Callable = None,
|
1487 |
+
safe_serialization: bool = True,
|
1488 |
+
):
|
1489 |
+
r"""
|
1490 |
+
Save the LoRA parameters corresponding to the UNet and text encoder.
|
1491 |
+
|
1492 |
+
Arguments:
|
1493 |
+
save_directory (`str` or `os.PathLike`):
|
1494 |
+
Directory to save LoRA parameters to. Will be created if it doesn't exist.
|
1495 |
+
unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
|
1496 |
+
State dict of the LoRA layers corresponding to the `unet`.
|
1497 |
+
text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
|
1498 |
+
State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
|
1499 |
+
encoder LoRA state dict because it comes from 🤗 Transformers.
|
1500 |
+
is_main_process (`bool`, *optional*, defaults to `True`):
|
1501 |
+
Whether the process calling this is the main process or not. Useful during distributed training and you
|
1502 |
+
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
|
1503 |
+
process to avoid race conditions.
|
1504 |
+
save_function (`Callable`):
|
1505 |
+
The function to use to save the state dictionary. Useful during distributed training when you need to
|
1506 |
+
replace `torch.save` with another method. Can be configured with the environment variable
|
1507 |
+
`DIFFUSERS_SAVE_MODE`.
|
1508 |
+
safe_serialization (`bool`, *optional*, defaults to `True`):
|
1509 |
+
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
|
1510 |
+
"""
|
1511 |
+
state_dict = {}
|
1512 |
+
|
1513 |
+
def pack_weights(layers, prefix):
|
1514 |
+
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
|
1515 |
+
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
|
1516 |
+
return layers_state_dict
|
1517 |
+
|
1518 |
+
if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
|
1519 |
+
raise ValueError(
|
1520 |
+
"You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
|
1521 |
+
)
|
1522 |
+
|
1523 |
+
if unet_lora_layers:
|
1524 |
+
state_dict.update(pack_weights(unet_lora_layers, "unet"))
|
1525 |
+
|
1526 |
+
if text_encoder_lora_layers and text_encoder_2_lora_layers:
|
1527 |
+
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
|
1528 |
+
state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
|
1529 |
+
|
1530 |
+
cls.write_lora_layers(
|
1531 |
+
state_dict=state_dict,
|
1532 |
+
save_directory=save_directory,
|
1533 |
+
is_main_process=is_main_process,
|
1534 |
+
weight_name=weight_name,
|
1535 |
+
save_function=save_function,
|
1536 |
+
safe_serialization=safe_serialization,
|
1537 |
+
)
|
1538 |
+
|
1539 |
+
def _remove_text_encoder_monkey_patch(self):
|
1540 |
+
if USE_PEFT_BACKEND:
|
1541 |
+
recurse_remove_peft_layers(self.text_encoder)
|
1542 |
+
# TODO: @younesbelkada handle this in transformers side
|
1543 |
+
if getattr(self.text_encoder, "peft_config", None) is not None:
|
1544 |
+
del self.text_encoder.peft_config
|
1545 |
+
self.text_encoder._hf_peft_config_loaded = None
|
1546 |
+
|
1547 |
+
recurse_remove_peft_layers(self.text_encoder_2)
|
1548 |
+
if getattr(self.text_encoder_2, "peft_config", None) is not None:
|
1549 |
+
del self.text_encoder_2.peft_config
|
1550 |
+
self.text_encoder_2._hf_peft_config_loaded = None
|
1551 |
+
else:
|
1552 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
|
1553 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
|
diffusers/loaders/lora_conversion_utils.py
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import re
|
16 |
+
|
17 |
+
from ..utils import logging
|
18 |
+
|
19 |
+
|
20 |
+
logger = logging.get_logger(__name__)
|
21 |
+
|
22 |
+
|
23 |
+
def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5):
|
24 |
+
# 1. get all state_dict_keys
|
25 |
+
all_keys = list(state_dict.keys())
|
26 |
+
sgm_patterns = ["input_blocks", "middle_block", "output_blocks"]
|
27 |
+
|
28 |
+
# 2. check if needs remapping, if not return original dict
|
29 |
+
is_in_sgm_format = False
|
30 |
+
for key in all_keys:
|
31 |
+
if any(p in key for p in sgm_patterns):
|
32 |
+
is_in_sgm_format = True
|
33 |
+
break
|
34 |
+
|
35 |
+
if not is_in_sgm_format:
|
36 |
+
return state_dict
|
37 |
+
|
38 |
+
# 3. Else remap from SGM patterns
|
39 |
+
new_state_dict = {}
|
40 |
+
inner_block_map = ["resnets", "attentions", "upsamplers"]
|
41 |
+
|
42 |
+
# Retrieves # of down, mid and up blocks
|
43 |
+
input_block_ids, middle_block_ids, output_block_ids = set(), set(), set()
|
44 |
+
|
45 |
+
for layer in all_keys:
|
46 |
+
if "text" in layer:
|
47 |
+
new_state_dict[layer] = state_dict.pop(layer)
|
48 |
+
else:
|
49 |
+
layer_id = int(layer.split(delimiter)[:block_slice_pos][-1])
|
50 |
+
if sgm_patterns[0] in layer:
|
51 |
+
input_block_ids.add(layer_id)
|
52 |
+
elif sgm_patterns[1] in layer:
|
53 |
+
middle_block_ids.add(layer_id)
|
54 |
+
elif sgm_patterns[2] in layer:
|
55 |
+
output_block_ids.add(layer_id)
|
56 |
+
else:
|
57 |
+
raise ValueError(f"Checkpoint not supported because layer {layer} not supported.")
|
58 |
+
|
59 |
+
input_blocks = {
|
60 |
+
layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key]
|
61 |
+
for layer_id in input_block_ids
|
62 |
+
}
|
63 |
+
middle_blocks = {
|
64 |
+
layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key]
|
65 |
+
for layer_id in middle_block_ids
|
66 |
+
}
|
67 |
+
output_blocks = {
|
68 |
+
layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key]
|
69 |
+
for layer_id in output_block_ids
|
70 |
+
}
|
71 |
+
|
72 |
+
# Rename keys accordingly
|
73 |
+
for i in input_block_ids:
|
74 |
+
block_id = (i - 1) // (unet_config.layers_per_block + 1)
|
75 |
+
layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1)
|
76 |
+
|
77 |
+
for key in input_blocks[i]:
|
78 |
+
inner_block_id = int(key.split(delimiter)[block_slice_pos])
|
79 |
+
inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers"
|
80 |
+
inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0"
|
81 |
+
new_key = delimiter.join(
|
82 |
+
key.split(delimiter)[: block_slice_pos - 1]
|
83 |
+
+ [str(block_id), inner_block_key, inner_layers_in_block]
|
84 |
+
+ key.split(delimiter)[block_slice_pos + 1 :]
|
85 |
+
)
|
86 |
+
new_state_dict[new_key] = state_dict.pop(key)
|
87 |
+
|
88 |
+
for i in middle_block_ids:
|
89 |
+
key_part = None
|
90 |
+
if i == 0:
|
91 |
+
key_part = [inner_block_map[0], "0"]
|
92 |
+
elif i == 1:
|
93 |
+
key_part = [inner_block_map[1], "0"]
|
94 |
+
elif i == 2:
|
95 |
+
key_part = [inner_block_map[0], "1"]
|
96 |
+
else:
|
97 |
+
raise ValueError(f"Invalid middle block id {i}.")
|
98 |
+
|
99 |
+
for key in middle_blocks[i]:
|
100 |
+
new_key = delimiter.join(
|
101 |
+
key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:]
|
102 |
+
)
|
103 |
+
new_state_dict[new_key] = state_dict.pop(key)
|
104 |
+
|
105 |
+
for i in output_block_ids:
|
106 |
+
block_id = i // (unet_config.layers_per_block + 1)
|
107 |
+
layer_in_block_id = i % (unet_config.layers_per_block + 1)
|
108 |
+
|
109 |
+
for key in output_blocks[i]:
|
110 |
+
inner_block_id = int(key.split(delimiter)[block_slice_pos])
|
111 |
+
inner_block_key = inner_block_map[inner_block_id]
|
112 |
+
inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0"
|
113 |
+
new_key = delimiter.join(
|
114 |
+
key.split(delimiter)[: block_slice_pos - 1]
|
115 |
+
+ [str(block_id), inner_block_key, inner_layers_in_block]
|
116 |
+
+ key.split(delimiter)[block_slice_pos + 1 :]
|
117 |
+
)
|
118 |
+
new_state_dict[new_key] = state_dict.pop(key)
|
119 |
+
|
120 |
+
if len(state_dict) > 0:
|
121 |
+
raise ValueError("At this point all state dict entries have to be converted.")
|
122 |
+
|
123 |
+
return new_state_dict
|
124 |
+
|
125 |
+
|
126 |
+
def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"):
|
127 |
+
unet_state_dict = {}
|
128 |
+
te_state_dict = {}
|
129 |
+
te2_state_dict = {}
|
130 |
+
network_alphas = {}
|
131 |
+
|
132 |
+
# every down weight has a corresponding up weight and potentially an alpha weight
|
133 |
+
lora_keys = [k for k in state_dict.keys() if k.endswith("lora_down.weight")]
|
134 |
+
for key in lora_keys:
|
135 |
+
lora_name = key.split(".")[0]
|
136 |
+
lora_name_up = lora_name + ".lora_up.weight"
|
137 |
+
lora_name_alpha = lora_name + ".alpha"
|
138 |
+
|
139 |
+
if lora_name.startswith("lora_unet_"):
|
140 |
+
diffusers_name = key.replace("lora_unet_", "").replace("_", ".")
|
141 |
+
|
142 |
+
if "input.blocks" in diffusers_name:
|
143 |
+
diffusers_name = diffusers_name.replace("input.blocks", "down_blocks")
|
144 |
+
else:
|
145 |
+
diffusers_name = diffusers_name.replace("down.blocks", "down_blocks")
|
146 |
+
|
147 |
+
if "middle.block" in diffusers_name:
|
148 |
+
diffusers_name = diffusers_name.replace("middle.block", "mid_block")
|
149 |
+
else:
|
150 |
+
diffusers_name = diffusers_name.replace("mid.block", "mid_block")
|
151 |
+
if "output.blocks" in diffusers_name:
|
152 |
+
diffusers_name = diffusers_name.replace("output.blocks", "up_blocks")
|
153 |
+
else:
|
154 |
+
diffusers_name = diffusers_name.replace("up.blocks", "up_blocks")
|
155 |
+
|
156 |
+
diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks")
|
157 |
+
diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora")
|
158 |
+
diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora")
|
159 |
+
diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora")
|
160 |
+
diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora")
|
161 |
+
diffusers_name = diffusers_name.replace("proj.in", "proj_in")
|
162 |
+
diffusers_name = diffusers_name.replace("proj.out", "proj_out")
|
163 |
+
diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj")
|
164 |
+
|
165 |
+
# SDXL specificity.
|
166 |
+
if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name:
|
167 |
+
pattern = r"\.\d+(?=\D*$)"
|
168 |
+
diffusers_name = re.sub(pattern, "", diffusers_name, count=1)
|
169 |
+
if ".in." in diffusers_name:
|
170 |
+
diffusers_name = diffusers_name.replace("in.layers.2", "conv1")
|
171 |
+
if ".out." in diffusers_name:
|
172 |
+
diffusers_name = diffusers_name.replace("out.layers.3", "conv2")
|
173 |
+
if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name:
|
174 |
+
diffusers_name = diffusers_name.replace("op", "conv")
|
175 |
+
if "skip" in diffusers_name:
|
176 |
+
diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut")
|
177 |
+
|
178 |
+
# LyCORIS specificity.
|
179 |
+
if "time.emb.proj" in diffusers_name:
|
180 |
+
diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj")
|
181 |
+
if "conv.shortcut" in diffusers_name:
|
182 |
+
diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut")
|
183 |
+
|
184 |
+
# General coverage.
|
185 |
+
if "transformer_blocks" in diffusers_name:
|
186 |
+
if "attn1" in diffusers_name or "attn2" in diffusers_name:
|
187 |
+
diffusers_name = diffusers_name.replace("attn1", "attn1.processor")
|
188 |
+
diffusers_name = diffusers_name.replace("attn2", "attn2.processor")
|
189 |
+
unet_state_dict[diffusers_name] = state_dict.pop(key)
|
190 |
+
unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
191 |
+
elif "ff" in diffusers_name:
|
192 |
+
unet_state_dict[diffusers_name] = state_dict.pop(key)
|
193 |
+
unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
194 |
+
elif any(key in diffusers_name for key in ("proj_in", "proj_out")):
|
195 |
+
unet_state_dict[diffusers_name] = state_dict.pop(key)
|
196 |
+
unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
197 |
+
else:
|
198 |
+
unet_state_dict[diffusers_name] = state_dict.pop(key)
|
199 |
+
unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
200 |
+
|
201 |
+
elif lora_name.startswith("lora_te_"):
|
202 |
+
diffusers_name = key.replace("lora_te_", "").replace("_", ".")
|
203 |
+
diffusers_name = diffusers_name.replace("text.model", "text_model")
|
204 |
+
diffusers_name = diffusers_name.replace("self.attn", "self_attn")
|
205 |
+
diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
|
206 |
+
diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
|
207 |
+
diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
|
208 |
+
diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
|
209 |
+
if "self_attn" in diffusers_name:
|
210 |
+
te_state_dict[diffusers_name] = state_dict.pop(key)
|
211 |
+
te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
212 |
+
elif "mlp" in diffusers_name:
|
213 |
+
# Be aware that this is the new diffusers convention and the rest of the code might
|
214 |
+
# not utilize it yet.
|
215 |
+
diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
|
216 |
+
te_state_dict[diffusers_name] = state_dict.pop(key)
|
217 |
+
te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
218 |
+
|
219 |
+
# (sayakpaul): Duplicate code. Needs to be cleaned.
|
220 |
+
elif lora_name.startswith("lora_te1_"):
|
221 |
+
diffusers_name = key.replace("lora_te1_", "").replace("_", ".")
|
222 |
+
diffusers_name = diffusers_name.replace("text.model", "text_model")
|
223 |
+
diffusers_name = diffusers_name.replace("self.attn", "self_attn")
|
224 |
+
diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
|
225 |
+
diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
|
226 |
+
diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
|
227 |
+
diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
|
228 |
+
if "self_attn" in diffusers_name:
|
229 |
+
te_state_dict[diffusers_name] = state_dict.pop(key)
|
230 |
+
te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
231 |
+
elif "mlp" in diffusers_name:
|
232 |
+
# Be aware that this is the new diffusers convention and the rest of the code might
|
233 |
+
# not utilize it yet.
|
234 |
+
diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
|
235 |
+
te_state_dict[diffusers_name] = state_dict.pop(key)
|
236 |
+
te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
237 |
+
|
238 |
+
# (sayakpaul): Duplicate code. Needs to be cleaned.
|
239 |
+
elif lora_name.startswith("lora_te2_"):
|
240 |
+
diffusers_name = key.replace("lora_te2_", "").replace("_", ".")
|
241 |
+
diffusers_name = diffusers_name.replace("text.model", "text_model")
|
242 |
+
diffusers_name = diffusers_name.replace("self.attn", "self_attn")
|
243 |
+
diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
|
244 |
+
diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
|
245 |
+
diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
|
246 |
+
diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
|
247 |
+
if "self_attn" in diffusers_name:
|
248 |
+
te2_state_dict[diffusers_name] = state_dict.pop(key)
|
249 |
+
te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
250 |
+
elif "mlp" in diffusers_name:
|
251 |
+
# Be aware that this is the new diffusers convention and the rest of the code might
|
252 |
+
# not utilize it yet.
|
253 |
+
diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
|
254 |
+
te2_state_dict[diffusers_name] = state_dict.pop(key)
|
255 |
+
te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
|
256 |
+
|
257 |
+
# Rename the alphas so that they can be mapped appropriately.
|
258 |
+
if lora_name_alpha in state_dict:
|
259 |
+
alpha = state_dict.pop(lora_name_alpha).item()
|
260 |
+
if lora_name_alpha.startswith("lora_unet_"):
|
261 |
+
prefix = "unet."
|
262 |
+
elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")):
|
263 |
+
prefix = "text_encoder."
|
264 |
+
else:
|
265 |
+
prefix = "text_encoder_2."
|
266 |
+
new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha"
|
267 |
+
network_alphas.update({new_name: alpha})
|
268 |
+
|
269 |
+
if len(state_dict) > 0:
|
270 |
+
raise ValueError(f"The following keys have not been correctly be renamed: \n\n {', '.join(state_dict.keys())}")
|
271 |
+
|
272 |
+
logger.info("Kohya-style checkpoint detected.")
|
273 |
+
unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()}
|
274 |
+
te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()}
|
275 |
+
te2_state_dict = (
|
276 |
+
{f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()}
|
277 |
+
if len(te2_state_dict) > 0
|
278 |
+
else None
|
279 |
+
)
|
280 |
+
if te2_state_dict is not None:
|
281 |
+
te_state_dict.update(te2_state_dict)
|
282 |
+
|
283 |
+
new_state_dict = {**unet_state_dict, **te_state_dict}
|
284 |
+
return new_state_dict, network_alphas
|
diffusers/loaders/peft.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
from typing import List, Union
|
16 |
+
|
17 |
+
from ..utils import MIN_PEFT_VERSION, check_peft_version, is_peft_available
|
18 |
+
|
19 |
+
|
20 |
+
class PeftAdapterMixin:
|
21 |
+
"""
|
22 |
+
A class containing all functions for loading and using adapters weights that are supported in PEFT library. For
|
23 |
+
more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT
|
24 |
+
library: https://huggingface.co/docs/peft/index.
|
25 |
+
|
26 |
+
|
27 |
+
With this mixin, if the correct PEFT version is installed, it is possible to:
|
28 |
+
|
29 |
+
- Attach new adapters in the model.
|
30 |
+
- Attach multiple adapters and iteratively activate / deactivate them.
|
31 |
+
- Activate / deactivate all adapters from the model.
|
32 |
+
- Get a list of the active adapters.
|
33 |
+
"""
|
34 |
+
|
35 |
+
_hf_peft_config_loaded = False
|
36 |
+
|
37 |
+
def add_adapter(self, adapter_config, adapter_name: str = "default") -> None:
|
38 |
+
r"""
|
39 |
+
Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned
|
40 |
+
to the adapter to follow the convention of the PEFT library.
|
41 |
+
|
42 |
+
If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT
|
43 |
+
[documentation](https://huggingface.co/docs/peft).
|
44 |
+
|
45 |
+
Args:
|
46 |
+
adapter_config (`[~peft.PeftConfig]`):
|
47 |
+
The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt
|
48 |
+
methods.
|
49 |
+
adapter_name (`str`, *optional*, defaults to `"default"`):
|
50 |
+
The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.
|
51 |
+
"""
|
52 |
+
check_peft_version(min_version=MIN_PEFT_VERSION)
|
53 |
+
|
54 |
+
if not is_peft_available():
|
55 |
+
raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.")
|
56 |
+
|
57 |
+
from peft import PeftConfig, inject_adapter_in_model
|
58 |
+
|
59 |
+
if not self._hf_peft_config_loaded:
|
60 |
+
self._hf_peft_config_loaded = True
|
61 |
+
elif adapter_name in self.peft_config:
|
62 |
+
raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
|
63 |
+
|
64 |
+
if not isinstance(adapter_config, PeftConfig):
|
65 |
+
raise ValueError(
|
66 |
+
f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead."
|
67 |
+
)
|
68 |
+
|
69 |
+
# Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is
|
70 |
+
# handled by the `load_lora_layers` or `LoraLoaderMixin`. Therefore we set it to `None` here.
|
71 |
+
adapter_config.base_model_name_or_path = None
|
72 |
+
inject_adapter_in_model(adapter_config, self, adapter_name)
|
73 |
+
self.set_adapter(adapter_name)
|
74 |
+
|
75 |
+
def set_adapter(self, adapter_name: Union[str, List[str]]) -> None:
|
76 |
+
"""
|
77 |
+
Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters.
|
78 |
+
|
79 |
+
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
|
80 |
+
official documentation: https://huggingface.co/docs/peft
|
81 |
+
|
82 |
+
Args:
|
83 |
+
adapter_name (Union[str, List[str]])):
|
84 |
+
The list of adapters to set or the adapter name in case of single adapter.
|
85 |
+
"""
|
86 |
+
check_peft_version(min_version=MIN_PEFT_VERSION)
|
87 |
+
|
88 |
+
if not self._hf_peft_config_loaded:
|
89 |
+
raise ValueError("No adapter loaded. Please load an adapter first.")
|
90 |
+
|
91 |
+
if isinstance(adapter_name, str):
|
92 |
+
adapter_name = [adapter_name]
|
93 |
+
|
94 |
+
missing = set(adapter_name) - set(self.peft_config)
|
95 |
+
if len(missing) > 0:
|
96 |
+
raise ValueError(
|
97 |
+
f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)."
|
98 |
+
f" current loaded adapters are: {list(self.peft_config.keys())}"
|
99 |
+
)
|
100 |
+
|
101 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
102 |
+
|
103 |
+
_adapters_has_been_set = False
|
104 |
+
|
105 |
+
for _, module in self.named_modules():
|
106 |
+
if isinstance(module, BaseTunerLayer):
|
107 |
+
if hasattr(module, "set_adapter"):
|
108 |
+
module.set_adapter(adapter_name)
|
109 |
+
# Previous versions of PEFT does not support multi-adapter inference
|
110 |
+
elif not hasattr(module, "set_adapter") and len(adapter_name) != 1:
|
111 |
+
raise ValueError(
|
112 |
+
"You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT."
|
113 |
+
" `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`"
|
114 |
+
)
|
115 |
+
else:
|
116 |
+
module.active_adapter = adapter_name
|
117 |
+
_adapters_has_been_set = True
|
118 |
+
|
119 |
+
if not _adapters_has_been_set:
|
120 |
+
raise ValueError(
|
121 |
+
"Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters."
|
122 |
+
)
|
123 |
+
|
124 |
+
def disable_adapters(self) -> None:
|
125 |
+
r"""
|
126 |
+
Disable all adapters attached to the model and fallback to inference with the base model only.
|
127 |
+
|
128 |
+
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
|
129 |
+
official documentation: https://huggingface.co/docs/peft
|
130 |
+
"""
|
131 |
+
check_peft_version(min_version=MIN_PEFT_VERSION)
|
132 |
+
|
133 |
+
if not self._hf_peft_config_loaded:
|
134 |
+
raise ValueError("No adapter loaded. Please load an adapter first.")
|
135 |
+
|
136 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
137 |
+
|
138 |
+
for _, module in self.named_modules():
|
139 |
+
if isinstance(module, BaseTunerLayer):
|
140 |
+
if hasattr(module, "enable_adapters"):
|
141 |
+
module.enable_adapters(enabled=False)
|
142 |
+
else:
|
143 |
+
# support for older PEFT versions
|
144 |
+
module.disable_adapters = True
|
145 |
+
|
146 |
+
def enable_adapters(self) -> None:
|
147 |
+
"""
|
148 |
+
Enable adapters that are attached to the model. The model will use `self.active_adapters()` to retrieve the
|
149 |
+
list of adapters to enable.
|
150 |
+
|
151 |
+
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
|
152 |
+
official documentation: https://huggingface.co/docs/peft
|
153 |
+
"""
|
154 |
+
check_peft_version(min_version=MIN_PEFT_VERSION)
|
155 |
+
|
156 |
+
if not self._hf_peft_config_loaded:
|
157 |
+
raise ValueError("No adapter loaded. Please load an adapter first.")
|
158 |
+
|
159 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
160 |
+
|
161 |
+
for _, module in self.named_modules():
|
162 |
+
if isinstance(module, BaseTunerLayer):
|
163 |
+
if hasattr(module, "enable_adapters"):
|
164 |
+
module.enable_adapters(enabled=True)
|
165 |
+
else:
|
166 |
+
# support for older PEFT versions
|
167 |
+
module.disable_adapters = False
|
168 |
+
|
169 |
+
def active_adapters(self) -> List[str]:
|
170 |
+
"""
|
171 |
+
Gets the current list of active adapters of the model.
|
172 |
+
|
173 |
+
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
|
174 |
+
official documentation: https://huggingface.co/docs/peft
|
175 |
+
"""
|
176 |
+
check_peft_version(min_version=MIN_PEFT_VERSION)
|
177 |
+
|
178 |
+
if not is_peft_available():
|
179 |
+
raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.")
|
180 |
+
|
181 |
+
if not self._hf_peft_config_loaded:
|
182 |
+
raise ValueError("No adapter loaded. Please load an adapter first.")
|
183 |
+
|
184 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
185 |
+
|
186 |
+
for _, module in self.named_modules():
|
187 |
+
if isinstance(module, BaseTunerLayer):
|
188 |
+
return module.active_adapter
|
diffusers/loaders/single_file.py
ADDED
@@ -0,0 +1,637 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from contextlib import nullcontext
|
15 |
+
from io import BytesIO
|
16 |
+
from pathlib import Path
|
17 |
+
|
18 |
+
import requests
|
19 |
+
import torch
|
20 |
+
from huggingface_hub import hf_hub_download
|
21 |
+
from huggingface_hub.utils import validate_hf_hub_args
|
22 |
+
|
23 |
+
from ..utils import (
|
24 |
+
deprecate,
|
25 |
+
is_accelerate_available,
|
26 |
+
is_omegaconf_available,
|
27 |
+
is_transformers_available,
|
28 |
+
logging,
|
29 |
+
)
|
30 |
+
from ..utils.import_utils import BACKENDS_MAPPING
|
31 |
+
|
32 |
+
|
33 |
+
if is_transformers_available():
|
34 |
+
pass
|
35 |
+
|
36 |
+
if is_accelerate_available():
|
37 |
+
from accelerate import init_empty_weights
|
38 |
+
|
39 |
+
logger = logging.get_logger(__name__)
|
40 |
+
|
41 |
+
|
42 |
+
class FromSingleFileMixin:
|
43 |
+
"""
|
44 |
+
Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`].
|
45 |
+
"""
|
46 |
+
|
47 |
+
@classmethod
|
48 |
+
def from_ckpt(cls, *args, **kwargs):
|
49 |
+
deprecation_message = "The function `from_ckpt` is deprecated in favor of `from_single_file` and will be removed in diffusers v.0.21. Please make sure to use `StableDiffusionPipeline.from_single_file(...)` instead."
|
50 |
+
deprecate("from_ckpt", "0.21.0", deprecation_message, standard_warn=False)
|
51 |
+
return cls.from_single_file(*args, **kwargs)
|
52 |
+
|
53 |
+
@classmethod
|
54 |
+
@validate_hf_hub_args
|
55 |
+
def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
|
56 |
+
r"""
|
57 |
+
Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors`
|
58 |
+
format. The pipeline is set in evaluation mode (`model.eval()`) by default.
|
59 |
+
|
60 |
+
Parameters:
|
61 |
+
pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
|
62 |
+
Can be either:
|
63 |
+
- A link to the `.ckpt` file (for example
|
64 |
+
`"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
|
65 |
+
- A path to a *file* containing all pipeline weights.
|
66 |
+
torch_dtype (`str` or `torch.dtype`, *optional*):
|
67 |
+
Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
|
68 |
+
dtype is automatically derived from the model's weights.
|
69 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
70 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
71 |
+
cached versions if they exist.
|
72 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
73 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
74 |
+
is not used.
|
75 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
76 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
77 |
+
incompletely downloaded files are deleted.
|
78 |
+
proxies (`Dict[str, str]`, *optional*):
|
79 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
80 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
81 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
82 |
+
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
83 |
+
won't be downloaded from the Hub.
|
84 |
+
token (`str` or *bool*, *optional*):
|
85 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
86 |
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
87 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
88 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
89 |
+
allowed by Git.
|
90 |
+
use_safetensors (`bool`, *optional*, defaults to `None`):
|
91 |
+
If set to `None`, the safetensors weights are downloaded if they're available **and** if the
|
92 |
+
safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
|
93 |
+
weights. If set to `False`, safetensors weights are not loaded.
|
94 |
+
extract_ema (`bool`, *optional*, defaults to `False`):
|
95 |
+
Whether to extract the EMA weights or not. Pass `True` to extract the EMA weights which usually yield
|
96 |
+
higher quality images for inference. Non-EMA weights are usually better for continuing finetuning.
|
97 |
+
upcast_attention (`bool`, *optional*, defaults to `None`):
|
98 |
+
Whether the attention computation should always be upcasted.
|
99 |
+
image_size (`int`, *optional*, defaults to 512):
|
100 |
+
The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
|
101 |
+
Diffusion v2 base model. Use 768 for Stable Diffusion v2.
|
102 |
+
prediction_type (`str`, *optional*):
|
103 |
+
The prediction type the model was trained on. Use `'epsilon'` for all Stable Diffusion v1 models and
|
104 |
+
the Stable Diffusion v2 base model. Use `'v_prediction'` for Stable Diffusion v2.
|
105 |
+
num_in_channels (`int`, *optional*, defaults to `None`):
|
106 |
+
The number of input channels. If `None`, it is automatically inferred.
|
107 |
+
scheduler_type (`str`, *optional*, defaults to `"pndm"`):
|
108 |
+
Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
|
109 |
+
"ddim"]`.
|
110 |
+
load_safety_checker (`bool`, *optional*, defaults to `True`):
|
111 |
+
Whether to load the safety checker or not.
|
112 |
+
text_encoder ([`~transformers.CLIPTextModel`], *optional*, defaults to `None`):
|
113 |
+
An instance of `CLIPTextModel` to use, specifically the
|
114 |
+
[clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. If this
|
115 |
+
parameter is `None`, the function loads a new instance of `CLIPTextModel` by itself if needed.
|
116 |
+
vae (`AutoencoderKL`, *optional*, defaults to `None`):
|
117 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
|
118 |
+
this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
|
119 |
+
tokenizer ([`~transformers.CLIPTokenizer`], *optional*, defaults to `None`):
|
120 |
+
An instance of `CLIPTokenizer` to use. If this parameter is `None`, the function loads a new instance
|
121 |
+
of `CLIPTokenizer` by itself if needed.
|
122 |
+
original_config_file (`str`):
|
123 |
+
Path to `.yaml` config file corresponding to the original architecture. If `None`, will be
|
124 |
+
automatically inferred by looking for a key that only exists in SD2.0 models.
|
125 |
+
kwargs (remaining dictionary of keyword arguments, *optional*):
|
126 |
+
Can be used to overwrite load and saveable variables (for example the pipeline components of the
|
127 |
+
specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
|
128 |
+
method. See example below for more information.
|
129 |
+
|
130 |
+
Examples:
|
131 |
+
|
132 |
+
```py
|
133 |
+
>>> from diffusers import StableDiffusionPipeline
|
134 |
+
|
135 |
+
>>> # Download pipeline from huggingface.co and cache.
|
136 |
+
>>> pipeline = StableDiffusionPipeline.from_single_file(
|
137 |
+
... "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
|
138 |
+
... )
|
139 |
+
|
140 |
+
>>> # Download pipeline from local file
|
141 |
+
>>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt
|
142 |
+
>>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly")
|
143 |
+
|
144 |
+
>>> # Enable float16 and move to GPU
|
145 |
+
>>> pipeline = StableDiffusionPipeline.from_single_file(
|
146 |
+
... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
|
147 |
+
... torch_dtype=torch.float16,
|
148 |
+
... )
|
149 |
+
>>> pipeline.to("cuda")
|
150 |
+
```
|
151 |
+
"""
|
152 |
+
# import here to avoid circular dependency
|
153 |
+
from ..pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
|
154 |
+
|
155 |
+
original_config_file = kwargs.pop("original_config_file", None)
|
156 |
+
config_files = kwargs.pop("config_files", None)
|
157 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
158 |
+
resume_download = kwargs.pop("resume_download", False)
|
159 |
+
force_download = kwargs.pop("force_download", False)
|
160 |
+
proxies = kwargs.pop("proxies", None)
|
161 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
162 |
+
token = kwargs.pop("token", None)
|
163 |
+
revision = kwargs.pop("revision", None)
|
164 |
+
extract_ema = kwargs.pop("extract_ema", False)
|
165 |
+
image_size = kwargs.pop("image_size", None)
|
166 |
+
scheduler_type = kwargs.pop("scheduler_type", "pndm")
|
167 |
+
num_in_channels = kwargs.pop("num_in_channels", None)
|
168 |
+
upcast_attention = kwargs.pop("upcast_attention", None)
|
169 |
+
load_safety_checker = kwargs.pop("load_safety_checker", True)
|
170 |
+
prediction_type = kwargs.pop("prediction_type", None)
|
171 |
+
text_encoder = kwargs.pop("text_encoder", None)
|
172 |
+
text_encoder_2 = kwargs.pop("text_encoder_2", None)
|
173 |
+
vae = kwargs.pop("vae", None)
|
174 |
+
controlnet = kwargs.pop("controlnet", None)
|
175 |
+
adapter = kwargs.pop("adapter", None)
|
176 |
+
tokenizer = kwargs.pop("tokenizer", None)
|
177 |
+
tokenizer_2 = kwargs.pop("tokenizer_2", None)
|
178 |
+
|
179 |
+
torch_dtype = kwargs.pop("torch_dtype", None)
|
180 |
+
|
181 |
+
use_safetensors = kwargs.pop("use_safetensors", None)
|
182 |
+
|
183 |
+
pipeline_name = cls.__name__
|
184 |
+
file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
|
185 |
+
from_safetensors = file_extension == "safetensors"
|
186 |
+
|
187 |
+
if from_safetensors and use_safetensors is False:
|
188 |
+
raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
|
189 |
+
|
190 |
+
# TODO: For now we only support stable diffusion
|
191 |
+
stable_unclip = None
|
192 |
+
model_type = None
|
193 |
+
|
194 |
+
if pipeline_name in [
|
195 |
+
"StableDiffusionControlNetPipeline",
|
196 |
+
"StableDiffusionControlNetImg2ImgPipeline",
|
197 |
+
"StableDiffusionControlNetInpaintPipeline",
|
198 |
+
]:
|
199 |
+
from ..models.controlnet import ControlNetModel
|
200 |
+
from ..pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
201 |
+
|
202 |
+
# list/tuple or a single instance of ControlNetModel or MultiControlNetModel
|
203 |
+
if not (
|
204 |
+
isinstance(controlnet, (ControlNetModel, MultiControlNetModel))
|
205 |
+
or isinstance(controlnet, (list, tuple))
|
206 |
+
and isinstance(controlnet[0], ControlNetModel)
|
207 |
+
):
|
208 |
+
raise ValueError("ControlNet needs to be passed if loading from ControlNet pipeline.")
|
209 |
+
elif "StableDiffusion" in pipeline_name:
|
210 |
+
# Model type will be inferred from the checkpoint.
|
211 |
+
pass
|
212 |
+
elif pipeline_name == "StableUnCLIPPipeline":
|
213 |
+
model_type = "FrozenOpenCLIPEmbedder"
|
214 |
+
stable_unclip = "txt2img"
|
215 |
+
elif pipeline_name == "StableUnCLIPImg2ImgPipeline":
|
216 |
+
model_type = "FrozenOpenCLIPEmbedder"
|
217 |
+
stable_unclip = "img2img"
|
218 |
+
elif pipeline_name == "PaintByExamplePipeline":
|
219 |
+
model_type = "PaintByExample"
|
220 |
+
elif pipeline_name == "LDMTextToImagePipeline":
|
221 |
+
model_type = "LDMTextToImage"
|
222 |
+
else:
|
223 |
+
raise ValueError(f"Unhandled pipeline class: {pipeline_name}")
|
224 |
+
|
225 |
+
# remove huggingface url
|
226 |
+
has_valid_url_prefix = False
|
227 |
+
valid_url_prefixes = ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]
|
228 |
+
for prefix in valid_url_prefixes:
|
229 |
+
if pretrained_model_link_or_path.startswith(prefix):
|
230 |
+
pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
|
231 |
+
has_valid_url_prefix = True
|
232 |
+
|
233 |
+
# Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
|
234 |
+
ckpt_path = Path(pretrained_model_link_or_path)
|
235 |
+
if not ckpt_path.is_file():
|
236 |
+
if not has_valid_url_prefix:
|
237 |
+
raise ValueError(
|
238 |
+
f"The provided path is either not a file or a valid huggingface URL was not provided. Valid URLs begin with {', '.join(valid_url_prefixes)}"
|
239 |
+
)
|
240 |
+
|
241 |
+
# get repo_id and (potentially nested) file path of ckpt in repo
|
242 |
+
repo_id = "/".join(ckpt_path.parts[:2])
|
243 |
+
file_path = "/".join(ckpt_path.parts[2:])
|
244 |
+
|
245 |
+
if file_path.startswith("blob/"):
|
246 |
+
file_path = file_path[len("blob/") :]
|
247 |
+
|
248 |
+
if file_path.startswith("main/"):
|
249 |
+
file_path = file_path[len("main/") :]
|
250 |
+
|
251 |
+
pretrained_model_link_or_path = hf_hub_download(
|
252 |
+
repo_id,
|
253 |
+
filename=file_path,
|
254 |
+
cache_dir=cache_dir,
|
255 |
+
resume_download=resume_download,
|
256 |
+
proxies=proxies,
|
257 |
+
local_files_only=local_files_only,
|
258 |
+
token=token,
|
259 |
+
revision=revision,
|
260 |
+
force_download=force_download,
|
261 |
+
)
|
262 |
+
|
263 |
+
pipe = download_from_original_stable_diffusion_ckpt(
|
264 |
+
pretrained_model_link_or_path,
|
265 |
+
pipeline_class=cls,
|
266 |
+
model_type=model_type,
|
267 |
+
stable_unclip=stable_unclip,
|
268 |
+
controlnet=controlnet,
|
269 |
+
adapter=adapter,
|
270 |
+
from_safetensors=from_safetensors,
|
271 |
+
extract_ema=extract_ema,
|
272 |
+
image_size=image_size,
|
273 |
+
scheduler_type=scheduler_type,
|
274 |
+
num_in_channels=num_in_channels,
|
275 |
+
upcast_attention=upcast_attention,
|
276 |
+
load_safety_checker=load_safety_checker,
|
277 |
+
prediction_type=prediction_type,
|
278 |
+
text_encoder=text_encoder,
|
279 |
+
text_encoder_2=text_encoder_2,
|
280 |
+
vae=vae,
|
281 |
+
tokenizer=tokenizer,
|
282 |
+
tokenizer_2=tokenizer_2,
|
283 |
+
original_config_file=original_config_file,
|
284 |
+
config_files=config_files,
|
285 |
+
local_files_only=local_files_only,
|
286 |
+
)
|
287 |
+
|
288 |
+
if torch_dtype is not None:
|
289 |
+
pipe.to(dtype=torch_dtype)
|
290 |
+
|
291 |
+
return pipe
|
292 |
+
|
293 |
+
|
294 |
+
class FromOriginalVAEMixin:
|
295 |
+
"""
|
296 |
+
Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into an [`AutoencoderKL`].
|
297 |
+
"""
|
298 |
+
|
299 |
+
@classmethod
|
300 |
+
@validate_hf_hub_args
|
301 |
+
def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
|
302 |
+
r"""
|
303 |
+
Instantiate a [`AutoencoderKL`] from pretrained ControlNet weights saved in the original `.ckpt` or
|
304 |
+
`.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
|
305 |
+
|
306 |
+
Parameters:
|
307 |
+
pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
|
308 |
+
Can be either:
|
309 |
+
- A link to the `.ckpt` file (for example
|
310 |
+
`"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
|
311 |
+
- A path to a *file* containing all pipeline weights.
|
312 |
+
torch_dtype (`str` or `torch.dtype`, *optional*):
|
313 |
+
Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
|
314 |
+
dtype is automatically derived from the model's weights.
|
315 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
316 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
317 |
+
cached versions if they exist.
|
318 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
319 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
320 |
+
is not used.
|
321 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
322 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
323 |
+
incompletely downloaded files are deleted.
|
324 |
+
proxies (`Dict[str, str]`, *optional*):
|
325 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
326 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
327 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
328 |
+
Whether to only load local model weights and configuration files or not. If set to True, the model
|
329 |
+
won't be downloaded from the Hub.
|
330 |
+
token (`str` or *bool*, *optional*):
|
331 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
332 |
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
333 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
334 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
335 |
+
allowed by Git.
|
336 |
+
image_size (`int`, *optional*, defaults to 512):
|
337 |
+
The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
|
338 |
+
Diffusion v2 base model. Use 768 for Stable Diffusion v2.
|
339 |
+
use_safetensors (`bool`, *optional*, defaults to `None`):
|
340 |
+
If set to `None`, the safetensors weights are downloaded if they're available **and** if the
|
341 |
+
safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
|
342 |
+
weights. If set to `False`, safetensors weights are not loaded.
|
343 |
+
upcast_attention (`bool`, *optional*, defaults to `None`):
|
344 |
+
Whether the attention computation should always be upcasted.
|
345 |
+
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
346 |
+
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
347 |
+
training set. This is used to scale the latent space to have unit variance when training the diffusion
|
348 |
+
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
|
349 |
+
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z
|
350 |
+
= 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution
|
351 |
+
Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
|
352 |
+
kwargs (remaining dictionary of keyword arguments, *optional*):
|
353 |
+
Can be used to overwrite load and saveable variables (for example the pipeline components of the
|
354 |
+
specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
|
355 |
+
method. See example below for more information.
|
356 |
+
|
357 |
+
<Tip warning={true}>
|
358 |
+
|
359 |
+
Make sure to pass both `image_size` and `scaling_factor` to `from_single_file()` if you're loading
|
360 |
+
a VAE from SDXL or a Stable Diffusion v2 model or higher.
|
361 |
+
|
362 |
+
</Tip>
|
363 |
+
|
364 |
+
Examples:
|
365 |
+
|
366 |
+
```py
|
367 |
+
from diffusers import AutoencoderKL
|
368 |
+
|
369 |
+
url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be local file
|
370 |
+
model = AutoencoderKL.from_single_file(url)
|
371 |
+
```
|
372 |
+
"""
|
373 |
+
if not is_omegaconf_available():
|
374 |
+
raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
|
375 |
+
|
376 |
+
from omegaconf import OmegaConf
|
377 |
+
|
378 |
+
from ..models import AutoencoderKL
|
379 |
+
|
380 |
+
# import here to avoid circular dependency
|
381 |
+
from ..pipelines.stable_diffusion.convert_from_ckpt import (
|
382 |
+
convert_ldm_vae_checkpoint,
|
383 |
+
create_vae_diffusers_config,
|
384 |
+
)
|
385 |
+
|
386 |
+
config_file = kwargs.pop("config_file", None)
|
387 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
388 |
+
resume_download = kwargs.pop("resume_download", False)
|
389 |
+
force_download = kwargs.pop("force_download", False)
|
390 |
+
proxies = kwargs.pop("proxies", None)
|
391 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
392 |
+
token = kwargs.pop("token", None)
|
393 |
+
revision = kwargs.pop("revision", None)
|
394 |
+
image_size = kwargs.pop("image_size", None)
|
395 |
+
scaling_factor = kwargs.pop("scaling_factor", None)
|
396 |
+
kwargs.pop("upcast_attention", None)
|
397 |
+
|
398 |
+
torch_dtype = kwargs.pop("torch_dtype", None)
|
399 |
+
|
400 |
+
use_safetensors = kwargs.pop("use_safetensors", None)
|
401 |
+
|
402 |
+
file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
|
403 |
+
from_safetensors = file_extension == "safetensors"
|
404 |
+
|
405 |
+
if from_safetensors and use_safetensors is False:
|
406 |
+
raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
|
407 |
+
|
408 |
+
# remove huggingface url
|
409 |
+
for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
|
410 |
+
if pretrained_model_link_or_path.startswith(prefix):
|
411 |
+
pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
|
412 |
+
|
413 |
+
# Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
|
414 |
+
ckpt_path = Path(pretrained_model_link_or_path)
|
415 |
+
if not ckpt_path.is_file():
|
416 |
+
# get repo_id and (potentially nested) file path of ckpt in repo
|
417 |
+
repo_id = "/".join(ckpt_path.parts[:2])
|
418 |
+
file_path = "/".join(ckpt_path.parts[2:])
|
419 |
+
|
420 |
+
if file_path.startswith("blob/"):
|
421 |
+
file_path = file_path[len("blob/") :]
|
422 |
+
|
423 |
+
if file_path.startswith("main/"):
|
424 |
+
file_path = file_path[len("main/") :]
|
425 |
+
|
426 |
+
pretrained_model_link_or_path = hf_hub_download(
|
427 |
+
repo_id,
|
428 |
+
filename=file_path,
|
429 |
+
cache_dir=cache_dir,
|
430 |
+
resume_download=resume_download,
|
431 |
+
proxies=proxies,
|
432 |
+
local_files_only=local_files_only,
|
433 |
+
token=token,
|
434 |
+
revision=revision,
|
435 |
+
force_download=force_download,
|
436 |
+
)
|
437 |
+
|
438 |
+
if from_safetensors:
|
439 |
+
from safetensors import safe_open
|
440 |
+
|
441 |
+
checkpoint = {}
|
442 |
+
with safe_open(pretrained_model_link_or_path, framework="pt", device="cpu") as f:
|
443 |
+
for key in f.keys():
|
444 |
+
checkpoint[key] = f.get_tensor(key)
|
445 |
+
else:
|
446 |
+
checkpoint = torch.load(pretrained_model_link_or_path, map_location="cpu")
|
447 |
+
|
448 |
+
if "state_dict" in checkpoint:
|
449 |
+
checkpoint = checkpoint["state_dict"]
|
450 |
+
|
451 |
+
if config_file is None:
|
452 |
+
config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
|
453 |
+
config_file = BytesIO(requests.get(config_url).content)
|
454 |
+
|
455 |
+
original_config = OmegaConf.load(config_file)
|
456 |
+
|
457 |
+
# default to sd-v1-5
|
458 |
+
image_size = image_size or 512
|
459 |
+
|
460 |
+
vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
|
461 |
+
converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
|
462 |
+
|
463 |
+
if scaling_factor is None:
|
464 |
+
if (
|
465 |
+
"model" in original_config
|
466 |
+
and "params" in original_config.model
|
467 |
+
and "scale_factor" in original_config.model.params
|
468 |
+
):
|
469 |
+
vae_scaling_factor = original_config.model.params.scale_factor
|
470 |
+
else:
|
471 |
+
vae_scaling_factor = 0.18215 # default SD scaling factor
|
472 |
+
|
473 |
+
vae_config["scaling_factor"] = vae_scaling_factor
|
474 |
+
|
475 |
+
ctx = init_empty_weights if is_accelerate_available() else nullcontext
|
476 |
+
with ctx():
|
477 |
+
vae = AutoencoderKL(**vae_config)
|
478 |
+
|
479 |
+
if is_accelerate_available():
|
480 |
+
from ..models.modeling_utils import load_model_dict_into_meta
|
481 |
+
|
482 |
+
load_model_dict_into_meta(vae, converted_vae_checkpoint, device="cpu")
|
483 |
+
else:
|
484 |
+
vae.load_state_dict(converted_vae_checkpoint)
|
485 |
+
|
486 |
+
if torch_dtype is not None:
|
487 |
+
vae.to(dtype=torch_dtype)
|
488 |
+
|
489 |
+
return vae
|
490 |
+
|
491 |
+
|
492 |
+
class FromOriginalControlnetMixin:
|
493 |
+
"""
|
494 |
+
Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into a [`ControlNetModel`].
|
495 |
+
"""
|
496 |
+
|
497 |
+
@classmethod
|
498 |
+
@validate_hf_hub_args
|
499 |
+
def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
|
500 |
+
r"""
|
501 |
+
Instantiate a [`ControlNetModel`] from pretrained ControlNet weights saved in the original `.ckpt` or
|
502 |
+
`.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
|
503 |
+
|
504 |
+
Parameters:
|
505 |
+
pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
|
506 |
+
Can be either:
|
507 |
+
- A link to the `.ckpt` file (for example
|
508 |
+
`"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
|
509 |
+
- A path to a *file* containing all pipeline weights.
|
510 |
+
torch_dtype (`str` or `torch.dtype`, *optional*):
|
511 |
+
Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
|
512 |
+
dtype is automatically derived from the model's weights.
|
513 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
514 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
515 |
+
cached versions if they exist.
|
516 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
517 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
518 |
+
is not used.
|
519 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
520 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
521 |
+
incompletely downloaded files are deleted.
|
522 |
+
proxies (`Dict[str, str]`, *optional*):
|
523 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
524 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
525 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
526 |
+
Whether to only load local model weights and configuration files or not. If set to True, the model
|
527 |
+
won't be downloaded from the Hub.
|
528 |
+
token (`str` or *bool*, *optional*):
|
529 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
530 |
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
531 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
532 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
533 |
+
allowed by Git.
|
534 |
+
use_safetensors (`bool`, *optional*, defaults to `None`):
|
535 |
+
If set to `None`, the safetensors weights are downloaded if they're available **and** if the
|
536 |
+
safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
|
537 |
+
weights. If set to `False`, safetensors weights are not loaded.
|
538 |
+
image_size (`int`, *optional*, defaults to 512):
|
539 |
+
The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
|
540 |
+
Diffusion v2 base model. Use 768 for Stable Diffusion v2.
|
541 |
+
upcast_attention (`bool`, *optional*, defaults to `None`):
|
542 |
+
Whether the attention computation should always be upcasted.
|
543 |
+
kwargs (remaining dictionary of keyword arguments, *optional*):
|
544 |
+
Can be used to overwrite load and saveable variables (for example the pipeline components of the
|
545 |
+
specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
|
546 |
+
method. See example below for more information.
|
547 |
+
|
548 |
+
Examples:
|
549 |
+
|
550 |
+
```py
|
551 |
+
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
552 |
+
|
553 |
+
url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path
|
554 |
+
model = ControlNetModel.from_single_file(url)
|
555 |
+
|
556 |
+
url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path
|
557 |
+
pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet)
|
558 |
+
```
|
559 |
+
"""
|
560 |
+
# import here to avoid circular dependency
|
561 |
+
from ..pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
|
562 |
+
|
563 |
+
config_file = kwargs.pop("config_file", None)
|
564 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
565 |
+
resume_download = kwargs.pop("resume_download", False)
|
566 |
+
force_download = kwargs.pop("force_download", False)
|
567 |
+
proxies = kwargs.pop("proxies", None)
|
568 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
569 |
+
token = kwargs.pop("token", None)
|
570 |
+
num_in_channels = kwargs.pop("num_in_channels", None)
|
571 |
+
use_linear_projection = kwargs.pop("use_linear_projection", None)
|
572 |
+
revision = kwargs.pop("revision", None)
|
573 |
+
extract_ema = kwargs.pop("extract_ema", False)
|
574 |
+
image_size = kwargs.pop("image_size", None)
|
575 |
+
upcast_attention = kwargs.pop("upcast_attention", None)
|
576 |
+
|
577 |
+
torch_dtype = kwargs.pop("torch_dtype", None)
|
578 |
+
|
579 |
+
use_safetensors = kwargs.pop("use_safetensors", None)
|
580 |
+
|
581 |
+
file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
|
582 |
+
from_safetensors = file_extension == "safetensors"
|
583 |
+
|
584 |
+
if from_safetensors and use_safetensors is False:
|
585 |
+
raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
|
586 |
+
|
587 |
+
# remove huggingface url
|
588 |
+
for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
|
589 |
+
if pretrained_model_link_or_path.startswith(prefix):
|
590 |
+
pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
|
591 |
+
|
592 |
+
# Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
|
593 |
+
ckpt_path = Path(pretrained_model_link_or_path)
|
594 |
+
if not ckpt_path.is_file():
|
595 |
+
# get repo_id and (potentially nested) file path of ckpt in repo
|
596 |
+
repo_id = "/".join(ckpt_path.parts[:2])
|
597 |
+
file_path = "/".join(ckpt_path.parts[2:])
|
598 |
+
|
599 |
+
if file_path.startswith("blob/"):
|
600 |
+
file_path = file_path[len("blob/") :]
|
601 |
+
|
602 |
+
if file_path.startswith("main/"):
|
603 |
+
file_path = file_path[len("main/") :]
|
604 |
+
|
605 |
+
pretrained_model_link_or_path = hf_hub_download(
|
606 |
+
repo_id,
|
607 |
+
filename=file_path,
|
608 |
+
cache_dir=cache_dir,
|
609 |
+
resume_download=resume_download,
|
610 |
+
proxies=proxies,
|
611 |
+
local_files_only=local_files_only,
|
612 |
+
token=token,
|
613 |
+
revision=revision,
|
614 |
+
force_download=force_download,
|
615 |
+
)
|
616 |
+
|
617 |
+
if config_file is None:
|
618 |
+
config_url = "https://raw.githubusercontent.com/lllyasviel/ControlNet/main/models/cldm_v15.yaml"
|
619 |
+
config_file = BytesIO(requests.get(config_url).content)
|
620 |
+
|
621 |
+
image_size = image_size or 512
|
622 |
+
|
623 |
+
controlnet = download_controlnet_from_original_ckpt(
|
624 |
+
pretrained_model_link_or_path,
|
625 |
+
original_config_file=config_file,
|
626 |
+
image_size=image_size,
|
627 |
+
extract_ema=extract_ema,
|
628 |
+
num_in_channels=num_in_channels,
|
629 |
+
upcast_attention=upcast_attention,
|
630 |
+
from_safetensors=from_safetensors,
|
631 |
+
use_linear_projection=use_linear_projection,
|
632 |
+
)
|
633 |
+
|
634 |
+
if torch_dtype is not None:
|
635 |
+
controlnet.to(dtype=torch_dtype)
|
636 |
+
|
637 |
+
return controlnet
|
diffusers/loaders/textual_inversion.py
ADDED
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import Dict, List, Optional, Union
|
15 |
+
|
16 |
+
import safetensors
|
17 |
+
import torch
|
18 |
+
from huggingface_hub.utils import validate_hf_hub_args
|
19 |
+
from torch import nn
|
20 |
+
|
21 |
+
from ..utils import _get_model_file, is_accelerate_available, is_transformers_available, logging
|
22 |
+
|
23 |
+
|
24 |
+
if is_transformers_available():
|
25 |
+
from transformers import PreTrainedModel, PreTrainedTokenizer
|
26 |
+
|
27 |
+
if is_accelerate_available():
|
28 |
+
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
|
29 |
+
|
30 |
+
logger = logging.get_logger(__name__)
|
31 |
+
|
32 |
+
TEXT_INVERSION_NAME = "learned_embeds.bin"
|
33 |
+
TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors"
|
34 |
+
|
35 |
+
|
36 |
+
@validate_hf_hub_args
|
37 |
+
def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs):
|
38 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
39 |
+
force_download = kwargs.pop("force_download", False)
|
40 |
+
resume_download = kwargs.pop("resume_download", False)
|
41 |
+
proxies = kwargs.pop("proxies", None)
|
42 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
43 |
+
token = kwargs.pop("token", None)
|
44 |
+
revision = kwargs.pop("revision", None)
|
45 |
+
subfolder = kwargs.pop("subfolder", None)
|
46 |
+
weight_name = kwargs.pop("weight_name", None)
|
47 |
+
use_safetensors = kwargs.pop("use_safetensors", None)
|
48 |
+
|
49 |
+
allow_pickle = False
|
50 |
+
if use_safetensors is None:
|
51 |
+
use_safetensors = True
|
52 |
+
allow_pickle = True
|
53 |
+
|
54 |
+
user_agent = {
|
55 |
+
"file_type": "text_inversion",
|
56 |
+
"framework": "pytorch",
|
57 |
+
}
|
58 |
+
state_dicts = []
|
59 |
+
for pretrained_model_name_or_path in pretrained_model_name_or_paths:
|
60 |
+
if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)):
|
61 |
+
# 3.1. Load textual inversion file
|
62 |
+
model_file = None
|
63 |
+
|
64 |
+
# Let's first try to load .safetensors weights
|
65 |
+
if (use_safetensors and weight_name is None) or (
|
66 |
+
weight_name is not None and weight_name.endswith(".safetensors")
|
67 |
+
):
|
68 |
+
try:
|
69 |
+
model_file = _get_model_file(
|
70 |
+
pretrained_model_name_or_path,
|
71 |
+
weights_name=weight_name or TEXT_INVERSION_NAME_SAFE,
|
72 |
+
cache_dir=cache_dir,
|
73 |
+
force_download=force_download,
|
74 |
+
resume_download=resume_download,
|
75 |
+
proxies=proxies,
|
76 |
+
local_files_only=local_files_only,
|
77 |
+
token=token,
|
78 |
+
revision=revision,
|
79 |
+
subfolder=subfolder,
|
80 |
+
user_agent=user_agent,
|
81 |
+
)
|
82 |
+
state_dict = safetensors.torch.load_file(model_file, device="cpu")
|
83 |
+
except Exception as e:
|
84 |
+
if not allow_pickle:
|
85 |
+
raise e
|
86 |
+
|
87 |
+
model_file = None
|
88 |
+
|
89 |
+
if model_file is None:
|
90 |
+
model_file = _get_model_file(
|
91 |
+
pretrained_model_name_or_path,
|
92 |
+
weights_name=weight_name or TEXT_INVERSION_NAME,
|
93 |
+
cache_dir=cache_dir,
|
94 |
+
force_download=force_download,
|
95 |
+
resume_download=resume_download,
|
96 |
+
proxies=proxies,
|
97 |
+
local_files_only=local_files_only,
|
98 |
+
token=token,
|
99 |
+
revision=revision,
|
100 |
+
subfolder=subfolder,
|
101 |
+
user_agent=user_agent,
|
102 |
+
)
|
103 |
+
state_dict = torch.load(model_file, map_location="cpu")
|
104 |
+
else:
|
105 |
+
state_dict = pretrained_model_name_or_path
|
106 |
+
|
107 |
+
state_dicts.append(state_dict)
|
108 |
+
|
109 |
+
return state_dicts
|
110 |
+
|
111 |
+
|
112 |
+
class TextualInversionLoaderMixin:
|
113 |
+
r"""
|
114 |
+
Load Textual Inversion tokens and embeddings to the tokenizer and text encoder.
|
115 |
+
"""
|
116 |
+
|
117 |
+
def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821
|
118 |
+
r"""
|
119 |
+
Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
|
120 |
+
be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
|
121 |
+
inversion token or if the textual inversion token is a single vector, the input prompt is returned.
|
122 |
+
|
123 |
+
Parameters:
|
124 |
+
prompt (`str` or list of `str`):
|
125 |
+
The prompt or prompts to guide the image generation.
|
126 |
+
tokenizer (`PreTrainedTokenizer`):
|
127 |
+
The tokenizer responsible for encoding the prompt into input tokens.
|
128 |
+
|
129 |
+
Returns:
|
130 |
+
`str` or list of `str`: The converted prompt
|
131 |
+
"""
|
132 |
+
if not isinstance(prompt, List):
|
133 |
+
prompts = [prompt]
|
134 |
+
else:
|
135 |
+
prompts = prompt
|
136 |
+
|
137 |
+
prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts]
|
138 |
+
|
139 |
+
if not isinstance(prompt, List):
|
140 |
+
return prompts[0]
|
141 |
+
|
142 |
+
return prompts
|
143 |
+
|
144 |
+
def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821
|
145 |
+
r"""
|
146 |
+
Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
|
147 |
+
to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
|
148 |
+
is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
|
149 |
+
inversion token or a textual inversion token that is a single vector, the input prompt is simply returned.
|
150 |
+
|
151 |
+
Parameters:
|
152 |
+
prompt (`str`):
|
153 |
+
The prompt to guide the image generation.
|
154 |
+
tokenizer (`PreTrainedTokenizer`):
|
155 |
+
The tokenizer responsible for encoding the prompt into input tokens.
|
156 |
+
|
157 |
+
Returns:
|
158 |
+
`str`: The converted prompt
|
159 |
+
"""
|
160 |
+
tokens = tokenizer.tokenize(prompt)
|
161 |
+
unique_tokens = set(tokens)
|
162 |
+
for token in unique_tokens:
|
163 |
+
if token in tokenizer.added_tokens_encoder:
|
164 |
+
replacement = token
|
165 |
+
i = 1
|
166 |
+
while f"{token}_{i}" in tokenizer.added_tokens_encoder:
|
167 |
+
replacement += f" {token}_{i}"
|
168 |
+
i += 1
|
169 |
+
|
170 |
+
prompt = prompt.replace(token, replacement)
|
171 |
+
|
172 |
+
return prompt
|
173 |
+
|
174 |
+
def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens):
|
175 |
+
if tokenizer is None:
|
176 |
+
raise ValueError(
|
177 |
+
f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling"
|
178 |
+
f" `{self.load_textual_inversion.__name__}`"
|
179 |
+
)
|
180 |
+
|
181 |
+
if text_encoder is None:
|
182 |
+
raise ValueError(
|
183 |
+
f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling"
|
184 |
+
f" `{self.load_textual_inversion.__name__}`"
|
185 |
+
)
|
186 |
+
|
187 |
+
if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens):
|
188 |
+
raise ValueError(
|
189 |
+
f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} "
|
190 |
+
f"Make sure both lists have the same length."
|
191 |
+
)
|
192 |
+
|
193 |
+
valid_tokens = [t for t in tokens if t is not None]
|
194 |
+
if len(set(valid_tokens)) < len(valid_tokens):
|
195 |
+
raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}")
|
196 |
+
|
197 |
+
@staticmethod
|
198 |
+
def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer):
|
199 |
+
all_tokens = []
|
200 |
+
all_embeddings = []
|
201 |
+
for state_dict, token in zip(state_dicts, tokens):
|
202 |
+
if isinstance(state_dict, torch.Tensor):
|
203 |
+
if token is None:
|
204 |
+
raise ValueError(
|
205 |
+
"You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`."
|
206 |
+
)
|
207 |
+
loaded_token = token
|
208 |
+
embedding = state_dict
|
209 |
+
elif len(state_dict) == 1:
|
210 |
+
# diffusers
|
211 |
+
loaded_token, embedding = next(iter(state_dict.items()))
|
212 |
+
elif "string_to_param" in state_dict:
|
213 |
+
# A1111
|
214 |
+
loaded_token = state_dict["name"]
|
215 |
+
embedding = state_dict["string_to_param"]["*"]
|
216 |
+
else:
|
217 |
+
raise ValueError(
|
218 |
+
f"Loaded state dictonary is incorrect: {state_dict}. \n\n"
|
219 |
+
"Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`"
|
220 |
+
" input key."
|
221 |
+
)
|
222 |
+
|
223 |
+
if token is not None and loaded_token != token:
|
224 |
+
logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.")
|
225 |
+
else:
|
226 |
+
token = loaded_token
|
227 |
+
|
228 |
+
if token in tokenizer.get_vocab():
|
229 |
+
raise ValueError(
|
230 |
+
f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
|
231 |
+
)
|
232 |
+
|
233 |
+
all_tokens.append(token)
|
234 |
+
all_embeddings.append(embedding)
|
235 |
+
|
236 |
+
return all_tokens, all_embeddings
|
237 |
+
|
238 |
+
@staticmethod
|
239 |
+
def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer):
|
240 |
+
all_tokens = []
|
241 |
+
all_embeddings = []
|
242 |
+
|
243 |
+
for embedding, token in zip(embeddings, tokens):
|
244 |
+
if f"{token}_1" in tokenizer.get_vocab():
|
245 |
+
multi_vector_tokens = [token]
|
246 |
+
i = 1
|
247 |
+
while f"{token}_{i}" in tokenizer.added_tokens_encoder:
|
248 |
+
multi_vector_tokens.append(f"{token}_{i}")
|
249 |
+
i += 1
|
250 |
+
|
251 |
+
raise ValueError(
|
252 |
+
f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder."
|
253 |
+
)
|
254 |
+
|
255 |
+
is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1
|
256 |
+
if is_multi_vector:
|
257 |
+
all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])]
|
258 |
+
all_embeddings += [e for e in embedding] # noqa: C416
|
259 |
+
else:
|
260 |
+
all_tokens += [token]
|
261 |
+
all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding]
|
262 |
+
|
263 |
+
return all_tokens, all_embeddings
|
264 |
+
|
265 |
+
@validate_hf_hub_args
|
266 |
+
def load_textual_inversion(
|
267 |
+
self,
|
268 |
+
pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
|
269 |
+
token: Optional[Union[str, List[str]]] = None,
|
270 |
+
tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821
|
271 |
+
text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
|
272 |
+
**kwargs,
|
273 |
+
):
|
274 |
+
r"""
|
275 |
+
Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
|
276 |
+
Automatic1111 formats are supported).
|
277 |
+
|
278 |
+
Parameters:
|
279 |
+
pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
|
280 |
+
Can be either one of the following or a list of them:
|
281 |
+
|
282 |
+
- A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
|
283 |
+
pretrained model hosted on the Hub.
|
284 |
+
- A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
|
285 |
+
inversion weights.
|
286 |
+
- A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
|
287 |
+
- A [torch state
|
288 |
+
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
289 |
+
|
290 |
+
token (`str` or `List[str]`, *optional*):
|
291 |
+
Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
|
292 |
+
list, then `token` must also be a list of equal length.
|
293 |
+
text_encoder ([`~transformers.CLIPTextModel`], *optional*):
|
294 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
295 |
+
If not specified, function will take self.tokenizer.
|
296 |
+
tokenizer ([`~transformers.CLIPTokenizer`], *optional*):
|
297 |
+
A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer.
|
298 |
+
weight_name (`str`, *optional*):
|
299 |
+
Name of a custom weight file. This should be used when:
|
300 |
+
|
301 |
+
- The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
|
302 |
+
name such as `text_inv.bin`.
|
303 |
+
- The saved textual inversion file is in the Automatic1111 format.
|
304 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
305 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
306 |
+
is not used.
|
307 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
308 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
309 |
+
cached versions if they exist.
|
310 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
311 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
312 |
+
incompletely downloaded files are deleted.
|
313 |
+
proxies (`Dict[str, str]`, *optional*):
|
314 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
315 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
316 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
317 |
+
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
318 |
+
won't be downloaded from the Hub.
|
319 |
+
token (`str` or *bool*, *optional*):
|
320 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
321 |
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
322 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
323 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
324 |
+
allowed by Git.
|
325 |
+
subfolder (`str`, *optional*, defaults to `""`):
|
326 |
+
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
327 |
+
mirror (`str`, *optional*):
|
328 |
+
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
|
329 |
+
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
|
330 |
+
information.
|
331 |
+
|
332 |
+
Example:
|
333 |
+
|
334 |
+
To load a Textual Inversion embedding vector in 🤗 Diffusers format:
|
335 |
+
|
336 |
+
```py
|
337 |
+
from diffusers import StableDiffusionPipeline
|
338 |
+
import torch
|
339 |
+
|
340 |
+
model_id = "runwayml/stable-diffusion-v1-5"
|
341 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
342 |
+
|
343 |
+
pipe.load_textual_inversion("sd-concepts-library/cat-toy")
|
344 |
+
|
345 |
+
prompt = "A <cat-toy> backpack"
|
346 |
+
|
347 |
+
image = pipe(prompt, num_inference_steps=50).images[0]
|
348 |
+
image.save("cat-backpack.png")
|
349 |
+
```
|
350 |
+
|
351 |
+
To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first
|
352 |
+
(for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector
|
353 |
+
locally:
|
354 |
+
|
355 |
+
```py
|
356 |
+
from diffusers import StableDiffusionPipeline
|
357 |
+
import torch
|
358 |
+
|
359 |
+
model_id = "runwayml/stable-diffusion-v1-5"
|
360 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
361 |
+
|
362 |
+
pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
|
363 |
+
|
364 |
+
prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."
|
365 |
+
|
366 |
+
image = pipe(prompt, num_inference_steps=50).images[0]
|
367 |
+
image.save("character.png")
|
368 |
+
```
|
369 |
+
|
370 |
+
"""
|
371 |
+
# 1. Set correct tokenizer and text encoder
|
372 |
+
tokenizer = tokenizer or getattr(self, "tokenizer", None)
|
373 |
+
text_encoder = text_encoder or getattr(self, "text_encoder", None)
|
374 |
+
|
375 |
+
# 2. Normalize inputs
|
376 |
+
pretrained_model_name_or_paths = (
|
377 |
+
[pretrained_model_name_or_path]
|
378 |
+
if not isinstance(pretrained_model_name_or_path, list)
|
379 |
+
else pretrained_model_name_or_path
|
380 |
+
)
|
381 |
+
tokens = [token] if not isinstance(token, list) else token
|
382 |
+
if tokens[0] is None:
|
383 |
+
tokens = tokens * len(pretrained_model_name_or_paths)
|
384 |
+
|
385 |
+
# 3. Check inputs
|
386 |
+
self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens)
|
387 |
+
|
388 |
+
# 4. Load state dicts of textual embeddings
|
389 |
+
state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs)
|
390 |
+
|
391 |
+
# 4.1 Handle the special case when state_dict is a tensor that contains n embeddings for n tokens
|
392 |
+
if len(tokens) > 1 and len(state_dicts) == 1:
|
393 |
+
if isinstance(state_dicts[0], torch.Tensor):
|
394 |
+
state_dicts = list(state_dicts[0])
|
395 |
+
if len(tokens) != len(state_dicts):
|
396 |
+
raise ValueError(
|
397 |
+
f"You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} "
|
398 |
+
f"Make sure both have the same length."
|
399 |
+
)
|
400 |
+
|
401 |
+
# 4. Retrieve tokens and embeddings
|
402 |
+
tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer)
|
403 |
+
|
404 |
+
# 5. Extend tokens and embeddings for multi vector
|
405 |
+
tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer)
|
406 |
+
|
407 |
+
# 6. Make sure all embeddings have the correct size
|
408 |
+
expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1]
|
409 |
+
if any(expected_emb_dim != emb.shape[-1] for emb in embeddings):
|
410 |
+
raise ValueError(
|
411 |
+
"Loaded embeddings are of incorrect shape. Expected each textual inversion embedding "
|
412 |
+
"to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} "
|
413 |
+
)
|
414 |
+
|
415 |
+
# 7. Now we can be sure that loading the embedding matrix works
|
416 |
+
# < Unsafe code:
|
417 |
+
|
418 |
+
# 7.1 Offload all hooks in case the pipeline was cpu offloaded before make sure, we offload and onload again
|
419 |
+
is_model_cpu_offload = False
|
420 |
+
is_sequential_cpu_offload = False
|
421 |
+
for _, component in self.components.items():
|
422 |
+
if isinstance(component, nn.Module):
|
423 |
+
if hasattr(component, "_hf_hook"):
|
424 |
+
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
|
425 |
+
is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
|
426 |
+
logger.info(
|
427 |
+
"Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again."
|
428 |
+
)
|
429 |
+
remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
|
430 |
+
|
431 |
+
# 7.2 save expected device and dtype
|
432 |
+
device = text_encoder.device
|
433 |
+
dtype = text_encoder.dtype
|
434 |
+
|
435 |
+
# 7.3 Increase token embedding matrix
|
436 |
+
text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens))
|
437 |
+
input_embeddings = text_encoder.get_input_embeddings().weight
|
438 |
+
|
439 |
+
# 7.4 Load token and embedding
|
440 |
+
for token, embedding in zip(tokens, embeddings):
|
441 |
+
# add tokens and get ids
|
442 |
+
tokenizer.add_tokens(token)
|
443 |
+
token_id = tokenizer.convert_tokens_to_ids(token)
|
444 |
+
input_embeddings.data[token_id] = embedding
|
445 |
+
logger.info(f"Loaded textual inversion embedding for {token}.")
|
446 |
+
|
447 |
+
input_embeddings.to(dtype=dtype, device=device)
|
448 |
+
|
449 |
+
# 7.5 Offload the model again
|
450 |
+
if is_model_cpu_offload:
|
451 |
+
self.enable_model_cpu_offload()
|
452 |
+
elif is_sequential_cpu_offload:
|
453 |
+
self.enable_sequential_cpu_offload()
|
454 |
+
|
455 |
+
# / Unsafe Code >
|
diffusers/loaders/unet.py
ADDED
@@ -0,0 +1,828 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import inspect
|
15 |
+
import os
|
16 |
+
from collections import defaultdict
|
17 |
+
from contextlib import nullcontext
|
18 |
+
from functools import partial
|
19 |
+
from typing import Callable, Dict, List, Optional, Union
|
20 |
+
|
21 |
+
import safetensors
|
22 |
+
import torch
|
23 |
+
import torch.nn.functional as F
|
24 |
+
from huggingface_hub.utils import validate_hf_hub_args
|
25 |
+
from torch import nn
|
26 |
+
|
27 |
+
from ..models.embeddings import ImageProjection, IPAdapterFullImageProjection, IPAdapterPlusImageProjection
|
28 |
+
from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
|
29 |
+
from ..utils import (
|
30 |
+
USE_PEFT_BACKEND,
|
31 |
+
_get_model_file,
|
32 |
+
delete_adapter_layers,
|
33 |
+
is_accelerate_available,
|
34 |
+
logging,
|
35 |
+
set_adapter_layers,
|
36 |
+
set_weights_and_activate_adapters,
|
37 |
+
)
|
38 |
+
from .utils import AttnProcsLayers
|
39 |
+
|
40 |
+
|
41 |
+
if is_accelerate_available():
|
42 |
+
from accelerate import init_empty_weights
|
43 |
+
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
|
44 |
+
|
45 |
+
logger = logging.get_logger(__name__)
|
46 |
+
|
47 |
+
|
48 |
+
TEXT_ENCODER_NAME = "text_encoder"
|
49 |
+
UNET_NAME = "unet"
|
50 |
+
|
51 |
+
LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
|
52 |
+
LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
|
53 |
+
|
54 |
+
CUSTOM_DIFFUSION_WEIGHT_NAME = "pytorch_custom_diffusion_weights.bin"
|
55 |
+
CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensors"
|
56 |
+
|
57 |
+
|
58 |
+
class UNet2DConditionLoadersMixin:
|
59 |
+
"""
|
60 |
+
Load LoRA layers into a [`UNet2DCondtionModel`].
|
61 |
+
"""
|
62 |
+
|
63 |
+
text_encoder_name = TEXT_ENCODER_NAME
|
64 |
+
unet_name = UNET_NAME
|
65 |
+
|
66 |
+
@validate_hf_hub_args
|
67 |
+
def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
|
68 |
+
r"""
|
69 |
+
Load pretrained attention processor layers into [`UNet2DConditionModel`]. Attention processor layers have to be
|
70 |
+
defined in
|
71 |
+
[`attention_processor.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py)
|
72 |
+
and be a `torch.nn.Module` class.
|
73 |
+
|
74 |
+
Parameters:
|
75 |
+
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
|
76 |
+
Can be either:
|
77 |
+
|
78 |
+
- A string, the model id (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
|
79 |
+
the Hub.
|
80 |
+
- A path to a directory (for example `./my_model_directory`) containing the model weights saved
|
81 |
+
with [`ModelMixin.save_pretrained`].
|
82 |
+
- A [torch state
|
83 |
+
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
84 |
+
|
85 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
86 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
87 |
+
is not used.
|
88 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
89 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
90 |
+
cached versions if they exist.
|
91 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
92 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
93 |
+
incompletely downloaded files are deleted.
|
94 |
+
proxies (`Dict[str, str]`, *optional*):
|
95 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
96 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
97 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
98 |
+
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
99 |
+
won't be downloaded from the Hub.
|
100 |
+
token (`str` or *bool*, *optional*):
|
101 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
102 |
+
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
103 |
+
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
|
104 |
+
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
|
105 |
+
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
|
106 |
+
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
|
107 |
+
argument to `True` will raise an error.
|
108 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
109 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
110 |
+
allowed by Git.
|
111 |
+
subfolder (`str`, *optional*, defaults to `""`):
|
112 |
+
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
113 |
+
mirror (`str`, *optional*):
|
114 |
+
Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
|
115 |
+
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
|
116 |
+
information.
|
117 |
+
|
118 |
+
Example:
|
119 |
+
|
120 |
+
```py
|
121 |
+
from diffusers import AutoPipelineForText2Image
|
122 |
+
import torch
|
123 |
+
|
124 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(
|
125 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
126 |
+
).to("cuda")
|
127 |
+
pipeline.unet.load_attn_procs(
|
128 |
+
"jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
|
129 |
+
)
|
130 |
+
```
|
131 |
+
"""
|
132 |
+
from ..models.attention_processor import CustomDiffusionAttnProcessor
|
133 |
+
from ..models.lora import LoRACompatibleConv, LoRACompatibleLinear, LoRAConv2dLayer, LoRALinearLayer
|
134 |
+
|
135 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
136 |
+
force_download = kwargs.pop("force_download", False)
|
137 |
+
resume_download = kwargs.pop("resume_download", False)
|
138 |
+
proxies = kwargs.pop("proxies", None)
|
139 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
140 |
+
token = kwargs.pop("token", None)
|
141 |
+
revision = kwargs.pop("revision", None)
|
142 |
+
subfolder = kwargs.pop("subfolder", None)
|
143 |
+
weight_name = kwargs.pop("weight_name", None)
|
144 |
+
use_safetensors = kwargs.pop("use_safetensors", None)
|
145 |
+
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
|
146 |
+
# This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
|
147 |
+
# See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
|
148 |
+
network_alphas = kwargs.pop("network_alphas", None)
|
149 |
+
|
150 |
+
_pipeline = kwargs.pop("_pipeline", None)
|
151 |
+
|
152 |
+
is_network_alphas_none = network_alphas is None
|
153 |
+
|
154 |
+
allow_pickle = False
|
155 |
+
|
156 |
+
if use_safetensors is None:
|
157 |
+
use_safetensors = True
|
158 |
+
allow_pickle = True
|
159 |
+
|
160 |
+
user_agent = {
|
161 |
+
"file_type": "attn_procs_weights",
|
162 |
+
"framework": "pytorch",
|
163 |
+
}
|
164 |
+
|
165 |
+
if low_cpu_mem_usage and not is_accelerate_available():
|
166 |
+
low_cpu_mem_usage = False
|
167 |
+
logger.warning(
|
168 |
+
"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
|
169 |
+
" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
|
170 |
+
" `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
|
171 |
+
" install accelerate\n```\n."
|
172 |
+
)
|
173 |
+
|
174 |
+
model_file = None
|
175 |
+
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
|
176 |
+
# Let's first try to load .safetensors weights
|
177 |
+
if (use_safetensors and weight_name is None) or (
|
178 |
+
weight_name is not None and weight_name.endswith(".safetensors")
|
179 |
+
):
|
180 |
+
try:
|
181 |
+
model_file = _get_model_file(
|
182 |
+
pretrained_model_name_or_path_or_dict,
|
183 |
+
weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
|
184 |
+
cache_dir=cache_dir,
|
185 |
+
force_download=force_download,
|
186 |
+
resume_download=resume_download,
|
187 |
+
proxies=proxies,
|
188 |
+
local_files_only=local_files_only,
|
189 |
+
token=token,
|
190 |
+
revision=revision,
|
191 |
+
subfolder=subfolder,
|
192 |
+
user_agent=user_agent,
|
193 |
+
)
|
194 |
+
state_dict = safetensors.torch.load_file(model_file, device="cpu")
|
195 |
+
except IOError as e:
|
196 |
+
if not allow_pickle:
|
197 |
+
raise e
|
198 |
+
# try loading non-safetensors weights
|
199 |
+
pass
|
200 |
+
if model_file is None:
|
201 |
+
model_file = _get_model_file(
|
202 |
+
pretrained_model_name_or_path_or_dict,
|
203 |
+
weights_name=weight_name or LORA_WEIGHT_NAME,
|
204 |
+
cache_dir=cache_dir,
|
205 |
+
force_download=force_download,
|
206 |
+
resume_download=resume_download,
|
207 |
+
proxies=proxies,
|
208 |
+
local_files_only=local_files_only,
|
209 |
+
token=token,
|
210 |
+
revision=revision,
|
211 |
+
subfolder=subfolder,
|
212 |
+
user_agent=user_agent,
|
213 |
+
)
|
214 |
+
state_dict = torch.load(model_file, map_location="cpu")
|
215 |
+
else:
|
216 |
+
state_dict = pretrained_model_name_or_path_or_dict
|
217 |
+
|
218 |
+
# fill attn processors
|
219 |
+
lora_layers_list = []
|
220 |
+
|
221 |
+
is_lora = all(("lora" in k or k.endswith(".alpha")) for k in state_dict.keys()) and not USE_PEFT_BACKEND
|
222 |
+
is_custom_diffusion = any("custom_diffusion" in k for k in state_dict.keys())
|
223 |
+
|
224 |
+
if is_lora:
|
225 |
+
# correct keys
|
226 |
+
state_dict, network_alphas = self.convert_state_dict_legacy_attn_format(state_dict, network_alphas)
|
227 |
+
|
228 |
+
if network_alphas is not None:
|
229 |
+
network_alphas_keys = list(network_alphas.keys())
|
230 |
+
used_network_alphas_keys = set()
|
231 |
+
|
232 |
+
lora_grouped_dict = defaultdict(dict)
|
233 |
+
mapped_network_alphas = {}
|
234 |
+
|
235 |
+
all_keys = list(state_dict.keys())
|
236 |
+
for key in all_keys:
|
237 |
+
value = state_dict.pop(key)
|
238 |
+
attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
|
239 |
+
lora_grouped_dict[attn_processor_key][sub_key] = value
|
240 |
+
|
241 |
+
# Create another `mapped_network_alphas` dictionary so that we can properly map them.
|
242 |
+
if network_alphas is not None:
|
243 |
+
for k in network_alphas_keys:
|
244 |
+
if k.replace(".alpha", "") in key:
|
245 |
+
mapped_network_alphas.update({attn_processor_key: network_alphas.get(k)})
|
246 |
+
used_network_alphas_keys.add(k)
|
247 |
+
|
248 |
+
if not is_network_alphas_none:
|
249 |
+
if len(set(network_alphas_keys) - used_network_alphas_keys) > 0:
|
250 |
+
raise ValueError(
|
251 |
+
f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
|
252 |
+
)
|
253 |
+
|
254 |
+
if len(state_dict) > 0:
|
255 |
+
raise ValueError(
|
256 |
+
f"The `state_dict` has to be empty at this point but has the following keys \n\n {', '.join(state_dict.keys())}"
|
257 |
+
)
|
258 |
+
|
259 |
+
for key, value_dict in lora_grouped_dict.items():
|
260 |
+
attn_processor = self
|
261 |
+
for sub_key in key.split("."):
|
262 |
+
attn_processor = getattr(attn_processor, sub_key)
|
263 |
+
|
264 |
+
# Process non-attention layers, which don't have to_{k,v,q,out_proj}_lora layers
|
265 |
+
# or add_{k,v,q,out_proj}_proj_lora layers.
|
266 |
+
rank = value_dict["lora.down.weight"].shape[0]
|
267 |
+
|
268 |
+
if isinstance(attn_processor, LoRACompatibleConv):
|
269 |
+
in_features = attn_processor.in_channels
|
270 |
+
out_features = attn_processor.out_channels
|
271 |
+
kernel_size = attn_processor.kernel_size
|
272 |
+
|
273 |
+
ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
|
274 |
+
with ctx():
|
275 |
+
lora = LoRAConv2dLayer(
|
276 |
+
in_features=in_features,
|
277 |
+
out_features=out_features,
|
278 |
+
rank=rank,
|
279 |
+
kernel_size=kernel_size,
|
280 |
+
stride=attn_processor.stride,
|
281 |
+
padding=attn_processor.padding,
|
282 |
+
network_alpha=mapped_network_alphas.get(key),
|
283 |
+
)
|
284 |
+
elif isinstance(attn_processor, LoRACompatibleLinear):
|
285 |
+
ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
|
286 |
+
with ctx():
|
287 |
+
lora = LoRALinearLayer(
|
288 |
+
attn_processor.in_features,
|
289 |
+
attn_processor.out_features,
|
290 |
+
rank,
|
291 |
+
mapped_network_alphas.get(key),
|
292 |
+
)
|
293 |
+
else:
|
294 |
+
raise ValueError(f"Module {key} is not a LoRACompatibleConv or LoRACompatibleLinear module.")
|
295 |
+
|
296 |
+
value_dict = {k.replace("lora.", ""): v for k, v in value_dict.items()}
|
297 |
+
lora_layers_list.append((attn_processor, lora))
|
298 |
+
|
299 |
+
if low_cpu_mem_usage:
|
300 |
+
device = next(iter(value_dict.values())).device
|
301 |
+
dtype = next(iter(value_dict.values())).dtype
|
302 |
+
load_model_dict_into_meta(lora, value_dict, device=device, dtype=dtype)
|
303 |
+
else:
|
304 |
+
lora.load_state_dict(value_dict)
|
305 |
+
|
306 |
+
elif is_custom_diffusion:
|
307 |
+
attn_processors = {}
|
308 |
+
custom_diffusion_grouped_dict = defaultdict(dict)
|
309 |
+
for key, value in state_dict.items():
|
310 |
+
if len(value) == 0:
|
311 |
+
custom_diffusion_grouped_dict[key] = {}
|
312 |
+
else:
|
313 |
+
if "to_out" in key:
|
314 |
+
attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
|
315 |
+
else:
|
316 |
+
attn_processor_key, sub_key = ".".join(key.split(".")[:-2]), ".".join(key.split(".")[-2:])
|
317 |
+
custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value
|
318 |
+
|
319 |
+
for key, value_dict in custom_diffusion_grouped_dict.items():
|
320 |
+
if len(value_dict) == 0:
|
321 |
+
attn_processors[key] = CustomDiffusionAttnProcessor(
|
322 |
+
train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None
|
323 |
+
)
|
324 |
+
else:
|
325 |
+
cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[1]
|
326 |
+
hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[0]
|
327 |
+
train_q_out = True if "to_q_custom_diffusion.weight" in value_dict else False
|
328 |
+
attn_processors[key] = CustomDiffusionAttnProcessor(
|
329 |
+
train_kv=True,
|
330 |
+
train_q_out=train_q_out,
|
331 |
+
hidden_size=hidden_size,
|
332 |
+
cross_attention_dim=cross_attention_dim,
|
333 |
+
)
|
334 |
+
attn_processors[key].load_state_dict(value_dict)
|
335 |
+
elif USE_PEFT_BACKEND:
|
336 |
+
# In that case we have nothing to do as loading the adapter weights is already handled above by `set_peft_model_state_dict`
|
337 |
+
# on the Unet
|
338 |
+
pass
|
339 |
+
else:
|
340 |
+
raise ValueError(
|
341 |
+
f"{model_file} does not seem to be in the correct format expected by LoRA or Custom Diffusion training."
|
342 |
+
)
|
343 |
+
|
344 |
+
# <Unsafe code
|
345 |
+
# We can be sure that the following works as it just sets attention processors, lora layers and puts all in the same dtype
|
346 |
+
# Now we remove any existing hooks to
|
347 |
+
is_model_cpu_offload = False
|
348 |
+
is_sequential_cpu_offload = False
|
349 |
+
|
350 |
+
# For PEFT backend the Unet is already offloaded at this stage as it is handled inside `lora_lora_weights_into_unet`
|
351 |
+
if not USE_PEFT_BACKEND:
|
352 |
+
if _pipeline is not None:
|
353 |
+
for _, component in _pipeline.components.items():
|
354 |
+
if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"):
|
355 |
+
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
|
356 |
+
is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
|
357 |
+
|
358 |
+
logger.info(
|
359 |
+
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
|
360 |
+
)
|
361 |
+
remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
|
362 |
+
|
363 |
+
# only custom diffusion needs to set attn processors
|
364 |
+
if is_custom_diffusion:
|
365 |
+
self.set_attn_processor(attn_processors)
|
366 |
+
|
367 |
+
# set lora layers
|
368 |
+
for target_module, lora_layer in lora_layers_list:
|
369 |
+
target_module.set_lora_layer(lora_layer)
|
370 |
+
|
371 |
+
self.to(dtype=self.dtype, device=self.device)
|
372 |
+
|
373 |
+
# Offload back.
|
374 |
+
if is_model_cpu_offload:
|
375 |
+
_pipeline.enable_model_cpu_offload()
|
376 |
+
elif is_sequential_cpu_offload:
|
377 |
+
_pipeline.enable_sequential_cpu_offload()
|
378 |
+
# Unsafe code />
|
379 |
+
|
380 |
+
def convert_state_dict_legacy_attn_format(self, state_dict, network_alphas):
|
381 |
+
is_new_lora_format = all(
|
382 |
+
key.startswith(self.unet_name) or key.startswith(self.text_encoder_name) for key in state_dict.keys()
|
383 |
+
)
|
384 |
+
if is_new_lora_format:
|
385 |
+
# Strip the `"unet"` prefix.
|
386 |
+
is_text_encoder_present = any(key.startswith(self.text_encoder_name) for key in state_dict.keys())
|
387 |
+
if is_text_encoder_present:
|
388 |
+
warn_message = "The state_dict contains LoRA params corresponding to the text encoder which are not being used here. To use both UNet and text encoder related LoRA params, use [`pipe.load_lora_weights()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights)."
|
389 |
+
logger.warn(warn_message)
|
390 |
+
unet_keys = [k for k in state_dict.keys() if k.startswith(self.unet_name)]
|
391 |
+
state_dict = {k.replace(f"{self.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}
|
392 |
+
|
393 |
+
# change processor format to 'pure' LoRACompatibleLinear format
|
394 |
+
if any("processor" in k.split(".") for k in state_dict.keys()):
|
395 |
+
|
396 |
+
def format_to_lora_compatible(key):
|
397 |
+
if "processor" not in key.split("."):
|
398 |
+
return key
|
399 |
+
return key.replace(".processor", "").replace("to_out_lora", "to_out.0.lora").replace("_lora", ".lora")
|
400 |
+
|
401 |
+
state_dict = {format_to_lora_compatible(k): v for k, v in state_dict.items()}
|
402 |
+
|
403 |
+
if network_alphas is not None:
|
404 |
+
network_alphas = {format_to_lora_compatible(k): v for k, v in network_alphas.items()}
|
405 |
+
return state_dict, network_alphas
|
406 |
+
|
407 |
+
def save_attn_procs(
|
408 |
+
self,
|
409 |
+
save_directory: Union[str, os.PathLike],
|
410 |
+
is_main_process: bool = True,
|
411 |
+
weight_name: str = None,
|
412 |
+
save_function: Callable = None,
|
413 |
+
safe_serialization: bool = True,
|
414 |
+
**kwargs,
|
415 |
+
):
|
416 |
+
r"""
|
417 |
+
Save attention processor layers to a directory so that it can be reloaded with the
|
418 |
+
[`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method.
|
419 |
+
|
420 |
+
Arguments:
|
421 |
+
save_directory (`str` or `os.PathLike`):
|
422 |
+
Directory to save an attention processor to (will be created if it doesn't exist).
|
423 |
+
is_main_process (`bool`, *optional*, defaults to `True`):
|
424 |
+
Whether the process calling this is the main process or not. Useful during distributed training and you
|
425 |
+
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
|
426 |
+
process to avoid race conditions.
|
427 |
+
save_function (`Callable`):
|
428 |
+
The function to use to save the state dictionary. Useful during distributed training when you need to
|
429 |
+
replace `torch.save` with another method. Can be configured with the environment variable
|
430 |
+
`DIFFUSERS_SAVE_MODE`.
|
431 |
+
safe_serialization (`bool`, *optional*, defaults to `True`):
|
432 |
+
Whether to save the model using `safetensors` or with `pickle`.
|
433 |
+
|
434 |
+
Example:
|
435 |
+
|
436 |
+
```py
|
437 |
+
import torch
|
438 |
+
from diffusers import DiffusionPipeline
|
439 |
+
|
440 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
441 |
+
"CompVis/stable-diffusion-v1-4",
|
442 |
+
torch_dtype=torch.float16,
|
443 |
+
).to("cuda")
|
444 |
+
pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
|
445 |
+
pipeline.unet.save_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
|
446 |
+
```
|
447 |
+
"""
|
448 |
+
from ..models.attention_processor import (
|
449 |
+
CustomDiffusionAttnProcessor,
|
450 |
+
CustomDiffusionAttnProcessor2_0,
|
451 |
+
CustomDiffusionXFormersAttnProcessor,
|
452 |
+
)
|
453 |
+
|
454 |
+
if os.path.isfile(save_directory):
|
455 |
+
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
|
456 |
+
return
|
457 |
+
|
458 |
+
if save_function is None:
|
459 |
+
if safe_serialization:
|
460 |
+
|
461 |
+
def save_function(weights, filename):
|
462 |
+
return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})
|
463 |
+
|
464 |
+
else:
|
465 |
+
save_function = torch.save
|
466 |
+
|
467 |
+
os.makedirs(save_directory, exist_ok=True)
|
468 |
+
|
469 |
+
is_custom_diffusion = any(
|
470 |
+
isinstance(
|
471 |
+
x,
|
472 |
+
(CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor),
|
473 |
+
)
|
474 |
+
for (_, x) in self.attn_processors.items()
|
475 |
+
)
|
476 |
+
if is_custom_diffusion:
|
477 |
+
model_to_save = AttnProcsLayers(
|
478 |
+
{
|
479 |
+
y: x
|
480 |
+
for (y, x) in self.attn_processors.items()
|
481 |
+
if isinstance(
|
482 |
+
x,
|
483 |
+
(
|
484 |
+
CustomDiffusionAttnProcessor,
|
485 |
+
CustomDiffusionAttnProcessor2_0,
|
486 |
+
CustomDiffusionXFormersAttnProcessor,
|
487 |
+
),
|
488 |
+
)
|
489 |
+
}
|
490 |
+
)
|
491 |
+
state_dict = model_to_save.state_dict()
|
492 |
+
for name, attn in self.attn_processors.items():
|
493 |
+
if len(attn.state_dict()) == 0:
|
494 |
+
state_dict[name] = {}
|
495 |
+
else:
|
496 |
+
model_to_save = AttnProcsLayers(self.attn_processors)
|
497 |
+
state_dict = model_to_save.state_dict()
|
498 |
+
|
499 |
+
if weight_name is None:
|
500 |
+
if safe_serialization:
|
501 |
+
weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else LORA_WEIGHT_NAME_SAFE
|
502 |
+
else:
|
503 |
+
weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else LORA_WEIGHT_NAME
|
504 |
+
|
505 |
+
# Save the model
|
506 |
+
save_function(state_dict, os.path.join(save_directory, weight_name))
|
507 |
+
logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
|
508 |
+
|
509 |
+
def fuse_lora(self, lora_scale=1.0, safe_fusing=False, adapter_names=None):
|
510 |
+
self.lora_scale = lora_scale
|
511 |
+
self._safe_fusing = safe_fusing
|
512 |
+
self.apply(partial(self._fuse_lora_apply, adapter_names=adapter_names))
|
513 |
+
|
514 |
+
def _fuse_lora_apply(self, module, adapter_names=None):
|
515 |
+
if not USE_PEFT_BACKEND:
|
516 |
+
if hasattr(module, "_fuse_lora"):
|
517 |
+
module._fuse_lora(self.lora_scale, self._safe_fusing)
|
518 |
+
|
519 |
+
if adapter_names is not None:
|
520 |
+
raise ValueError(
|
521 |
+
"The `adapter_names` argument is not supported in your environment. Please switch"
|
522 |
+
" to PEFT backend to use this argument by installing latest PEFT and transformers."
|
523 |
+
" `pip install -U peft transformers`"
|
524 |
+
)
|
525 |
+
else:
|
526 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
527 |
+
|
528 |
+
merge_kwargs = {"safe_merge": self._safe_fusing}
|
529 |
+
|
530 |
+
if isinstance(module, BaseTunerLayer):
|
531 |
+
if self.lora_scale != 1.0:
|
532 |
+
module.scale_layer(self.lora_scale)
|
533 |
+
|
534 |
+
# For BC with prevous PEFT versions, we need to check the signature
|
535 |
+
# of the `merge` method to see if it supports the `adapter_names` argument.
|
536 |
+
supported_merge_kwargs = list(inspect.signature(module.merge).parameters)
|
537 |
+
if "adapter_names" in supported_merge_kwargs:
|
538 |
+
merge_kwargs["adapter_names"] = adapter_names
|
539 |
+
elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None:
|
540 |
+
raise ValueError(
|
541 |
+
"The `adapter_names` argument is not supported with your PEFT version. Please upgrade"
|
542 |
+
" to the latest version of PEFT. `pip install -U peft`"
|
543 |
+
)
|
544 |
+
|
545 |
+
module.merge(**merge_kwargs)
|
546 |
+
|
547 |
+
def unfuse_lora(self):
|
548 |
+
self.apply(self._unfuse_lora_apply)
|
549 |
+
|
550 |
+
def _unfuse_lora_apply(self, module):
|
551 |
+
if not USE_PEFT_BACKEND:
|
552 |
+
if hasattr(module, "_unfuse_lora"):
|
553 |
+
module._unfuse_lora()
|
554 |
+
else:
|
555 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
556 |
+
|
557 |
+
if isinstance(module, BaseTunerLayer):
|
558 |
+
module.unmerge()
|
559 |
+
|
560 |
+
def set_adapters(
|
561 |
+
self,
|
562 |
+
adapter_names: Union[List[str], str],
|
563 |
+
weights: Optional[Union[List[float], float]] = None,
|
564 |
+
):
|
565 |
+
"""
|
566 |
+
Set the currently active adapters for use in the UNet.
|
567 |
+
|
568 |
+
Args:
|
569 |
+
adapter_names (`List[str]` or `str`):
|
570 |
+
The names of the adapters to use.
|
571 |
+
adapter_weights (`Union[List[float], float]`, *optional*):
|
572 |
+
The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the
|
573 |
+
adapters.
|
574 |
+
|
575 |
+
Example:
|
576 |
+
|
577 |
+
```py
|
578 |
+
from diffusers import AutoPipelineForText2Image
|
579 |
+
import torch
|
580 |
+
|
581 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(
|
582 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
583 |
+
).to("cuda")
|
584 |
+
pipeline.load_lora_weights(
|
585 |
+
"jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
|
586 |
+
)
|
587 |
+
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
|
588 |
+
pipeline.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5])
|
589 |
+
```
|
590 |
+
"""
|
591 |
+
if not USE_PEFT_BACKEND:
|
592 |
+
raise ValueError("PEFT backend is required for `set_adapters()`.")
|
593 |
+
|
594 |
+
adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
|
595 |
+
|
596 |
+
if weights is None:
|
597 |
+
weights = [1.0] * len(adapter_names)
|
598 |
+
elif isinstance(weights, float):
|
599 |
+
weights = [weights] * len(adapter_names)
|
600 |
+
|
601 |
+
if len(adapter_names) != len(weights):
|
602 |
+
raise ValueError(
|
603 |
+
f"Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}."
|
604 |
+
)
|
605 |
+
|
606 |
+
set_weights_and_activate_adapters(self, adapter_names, weights)
|
607 |
+
|
608 |
+
def disable_lora(self):
|
609 |
+
"""
|
610 |
+
Disable the UNet's active LoRA layers.
|
611 |
+
|
612 |
+
Example:
|
613 |
+
|
614 |
+
```py
|
615 |
+
from diffusers import AutoPipelineForText2Image
|
616 |
+
import torch
|
617 |
+
|
618 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(
|
619 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
620 |
+
).to("cuda")
|
621 |
+
pipeline.load_lora_weights(
|
622 |
+
"jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
|
623 |
+
)
|
624 |
+
pipeline.disable_lora()
|
625 |
+
```
|
626 |
+
"""
|
627 |
+
if not USE_PEFT_BACKEND:
|
628 |
+
raise ValueError("PEFT backend is required for this method.")
|
629 |
+
set_adapter_layers(self, enabled=False)
|
630 |
+
|
631 |
+
def enable_lora(self):
|
632 |
+
"""
|
633 |
+
Enable the UNet's active LoRA layers.
|
634 |
+
|
635 |
+
Example:
|
636 |
+
|
637 |
+
```py
|
638 |
+
from diffusers import AutoPipelineForText2Image
|
639 |
+
import torch
|
640 |
+
|
641 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(
|
642 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
643 |
+
).to("cuda")
|
644 |
+
pipeline.load_lora_weights(
|
645 |
+
"jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
|
646 |
+
)
|
647 |
+
pipeline.enable_lora()
|
648 |
+
```
|
649 |
+
"""
|
650 |
+
if not USE_PEFT_BACKEND:
|
651 |
+
raise ValueError("PEFT backend is required for this method.")
|
652 |
+
set_adapter_layers(self, enabled=True)
|
653 |
+
|
654 |
+
def delete_adapters(self, adapter_names: Union[List[str], str]):
|
655 |
+
"""
|
656 |
+
Delete an adapter's LoRA layers from the UNet.
|
657 |
+
|
658 |
+
Args:
|
659 |
+
adapter_names (`Union[List[str], str]`):
|
660 |
+
The names (single string or list of strings) of the adapter to delete.
|
661 |
+
|
662 |
+
Example:
|
663 |
+
|
664 |
+
```py
|
665 |
+
from diffusers import AutoPipelineForText2Image
|
666 |
+
import torch
|
667 |
+
|
668 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(
|
669 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
670 |
+
).to("cuda")
|
671 |
+
pipeline.load_lora_weights(
|
672 |
+
"jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic"
|
673 |
+
)
|
674 |
+
pipeline.delete_adapters("cinematic")
|
675 |
+
```
|
676 |
+
"""
|
677 |
+
if not USE_PEFT_BACKEND:
|
678 |
+
raise ValueError("PEFT backend is required for this method.")
|
679 |
+
|
680 |
+
if isinstance(adapter_names, str):
|
681 |
+
adapter_names = [adapter_names]
|
682 |
+
|
683 |
+
for adapter_name in adapter_names:
|
684 |
+
delete_adapter_layers(self, adapter_name)
|
685 |
+
|
686 |
+
# Pop also the corresponding adapter from the config
|
687 |
+
if hasattr(self, "peft_config"):
|
688 |
+
self.peft_config.pop(adapter_name, None)
|
689 |
+
|
690 |
+
def _convert_ip_adapter_image_proj_to_diffusers(self, state_dict):
|
691 |
+
updated_state_dict = {}
|
692 |
+
image_projection = None
|
693 |
+
|
694 |
+
if "proj.weight" in state_dict:
|
695 |
+
# IP-Adapter
|
696 |
+
num_image_text_embeds = 4
|
697 |
+
clip_embeddings_dim = state_dict["proj.weight"].shape[-1]
|
698 |
+
cross_attention_dim = state_dict["proj.weight"].shape[0] // 4
|
699 |
+
|
700 |
+
image_projection = ImageProjection(
|
701 |
+
cross_attention_dim=cross_attention_dim,
|
702 |
+
image_embed_dim=clip_embeddings_dim,
|
703 |
+
num_image_text_embeds=num_image_text_embeds,
|
704 |
+
)
|
705 |
+
|
706 |
+
for key, value in state_dict.items():
|
707 |
+
diffusers_name = key.replace("proj", "image_embeds")
|
708 |
+
updated_state_dict[diffusers_name] = value
|
709 |
+
|
710 |
+
elif "proj.3.weight" in state_dict:
|
711 |
+
# IP-Adapter Full
|
712 |
+
clip_embeddings_dim = state_dict["proj.0.weight"].shape[0]
|
713 |
+
cross_attention_dim = state_dict["proj.3.weight"].shape[0]
|
714 |
+
|
715 |
+
image_projection = IPAdapterFullImageProjection(
|
716 |
+
cross_attention_dim=cross_attention_dim, image_embed_dim=clip_embeddings_dim
|
717 |
+
)
|
718 |
+
|
719 |
+
for key, value in state_dict.items():
|
720 |
+
diffusers_name = key.replace("proj.0", "ff.net.0.proj")
|
721 |
+
diffusers_name = diffusers_name.replace("proj.2", "ff.net.2")
|
722 |
+
diffusers_name = diffusers_name.replace("proj.3", "norm")
|
723 |
+
updated_state_dict[diffusers_name] = value
|
724 |
+
|
725 |
+
else:
|
726 |
+
# IP-Adapter Plus
|
727 |
+
num_image_text_embeds = state_dict["latents"].shape[1]
|
728 |
+
embed_dims = state_dict["proj_in.weight"].shape[1]
|
729 |
+
output_dims = state_dict["proj_out.weight"].shape[0]
|
730 |
+
hidden_dims = state_dict["latents"].shape[2]
|
731 |
+
heads = state_dict["layers.0.0.to_q.weight"].shape[0] // 64
|
732 |
+
|
733 |
+
image_projection = IPAdapterPlusImageProjection(
|
734 |
+
embed_dims=embed_dims,
|
735 |
+
output_dims=output_dims,
|
736 |
+
hidden_dims=hidden_dims,
|
737 |
+
heads=heads,
|
738 |
+
num_queries=num_image_text_embeds,
|
739 |
+
)
|
740 |
+
|
741 |
+
for key, value in state_dict.items():
|
742 |
+
diffusers_name = key.replace("0.to", "2.to")
|
743 |
+
diffusers_name = diffusers_name.replace("1.0.weight", "3.0.weight")
|
744 |
+
diffusers_name = diffusers_name.replace("1.0.bias", "3.0.bias")
|
745 |
+
diffusers_name = diffusers_name.replace("1.1.weight", "3.1.net.0.proj.weight")
|
746 |
+
diffusers_name = diffusers_name.replace("1.3.weight", "3.1.net.2.weight")
|
747 |
+
|
748 |
+
if "norm1" in diffusers_name:
|
749 |
+
updated_state_dict[diffusers_name.replace("0.norm1", "0")] = value
|
750 |
+
elif "norm2" in diffusers_name:
|
751 |
+
updated_state_dict[diffusers_name.replace("0.norm2", "1")] = value
|
752 |
+
elif "to_kv" in diffusers_name:
|
753 |
+
v_chunk = value.chunk(2, dim=0)
|
754 |
+
updated_state_dict[diffusers_name.replace("to_kv", "to_k")] = v_chunk[0]
|
755 |
+
updated_state_dict[diffusers_name.replace("to_kv", "to_v")] = v_chunk[1]
|
756 |
+
elif "to_out" in diffusers_name:
|
757 |
+
updated_state_dict[diffusers_name.replace("to_out", "to_out.0")] = value
|
758 |
+
else:
|
759 |
+
updated_state_dict[diffusers_name] = value
|
760 |
+
|
761 |
+
image_projection.load_state_dict(updated_state_dict)
|
762 |
+
return image_projection
|
763 |
+
|
764 |
+
def _load_ip_adapter_weights(self, state_dict):
|
765 |
+
from ..models.attention_processor import (
|
766 |
+
AttnProcessor,
|
767 |
+
AttnProcessor2_0,
|
768 |
+
IPAdapterAttnProcessor,
|
769 |
+
IPAdapterAttnProcessor2_0,
|
770 |
+
)
|
771 |
+
|
772 |
+
if "proj.weight" in state_dict["image_proj"]:
|
773 |
+
# IP-Adapter
|
774 |
+
num_image_text_embeds = 4
|
775 |
+
elif "proj.3.weight" in state_dict["image_proj"]:
|
776 |
+
# IP-Adapter Full Face
|
777 |
+
num_image_text_embeds = 257 # 256 CLIP tokens + 1 CLS token
|
778 |
+
else:
|
779 |
+
# IP-Adapter Plus
|
780 |
+
num_image_text_embeds = state_dict["image_proj"]["latents"].shape[1]
|
781 |
+
|
782 |
+
# Set encoder_hid_proj after loading ip_adapter weights,
|
783 |
+
# because `IPAdapterPlusImageProjection` also has `attn_processors`.
|
784 |
+
self.encoder_hid_proj = None
|
785 |
+
|
786 |
+
# set ip-adapter cross-attention processors & load state_dict
|
787 |
+
attn_procs = {}
|
788 |
+
key_id = 1
|
789 |
+
for name in self.attn_processors.keys():
|
790 |
+
cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim
|
791 |
+
if name.startswith("mid_block"):
|
792 |
+
hidden_size = self.config.block_out_channels[-1]
|
793 |
+
elif name.startswith("up_blocks"):
|
794 |
+
block_id = int(name[len("up_blocks.")])
|
795 |
+
hidden_size = list(reversed(self.config.block_out_channels))[block_id]
|
796 |
+
elif name.startswith("down_blocks"):
|
797 |
+
block_id = int(name[len("down_blocks.")])
|
798 |
+
hidden_size = self.config.block_out_channels[block_id]
|
799 |
+
if cross_attention_dim is None or "motion_modules" in name:
|
800 |
+
attn_processor_class = (
|
801 |
+
AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
|
802 |
+
)
|
803 |
+
attn_procs[name] = attn_processor_class()
|
804 |
+
else:
|
805 |
+
attn_processor_class = (
|
806 |
+
IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor
|
807 |
+
)
|
808 |
+
attn_procs[name] = attn_processor_class(
|
809 |
+
hidden_size=hidden_size,
|
810 |
+
cross_attention_dim=cross_attention_dim,
|
811 |
+
scale=1.0,
|
812 |
+
num_tokens=num_image_text_embeds,
|
813 |
+
).to(dtype=self.dtype, device=self.device)
|
814 |
+
|
815 |
+
value_dict = {}
|
816 |
+
for k, w in attn_procs[name].state_dict().items():
|
817 |
+
value_dict.update({f"{k}": state_dict["ip_adapter"][f"{key_id}.{k}"]})
|
818 |
+
|
819 |
+
attn_procs[name].load_state_dict(value_dict)
|
820 |
+
key_id += 2
|
821 |
+
|
822 |
+
self.set_attn_processor(attn_procs)
|
823 |
+
|
824 |
+
# convert IP-Adapter Image Projection layers to diffusers
|
825 |
+
image_projection = self._convert_ip_adapter_image_proj_to_diffusers(state_dict["image_proj"])
|
826 |
+
|
827 |
+
self.encoder_hid_proj = image_projection.to(device=self.device, dtype=self.dtype)
|
828 |
+
self.config.encoder_hid_dim_type = "ip_image_proj"
|
diffusers/loaders/utils.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import Dict
|
16 |
+
|
17 |
+
import torch
|
18 |
+
|
19 |
+
|
20 |
+
class AttnProcsLayers(torch.nn.Module):
|
21 |
+
def __init__(self, state_dict: Dict[str, torch.Tensor]):
|
22 |
+
super().__init__()
|
23 |
+
self.layers = torch.nn.ModuleList(state_dict.values())
|
24 |
+
self.mapping = dict(enumerate(state_dict.keys()))
|
25 |
+
self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
|
26 |
+
|
27 |
+
# .processor for unet, .self_attn for text encoder
|
28 |
+
self.split_keys = [".processor", ".self_attn"]
|
29 |
+
|
30 |
+
# we add a hook to state_dict() and load_state_dict() so that the
|
31 |
+
# naming fits with `unet.attn_processors`
|
32 |
+
def map_to(module, state_dict, *args, **kwargs):
|
33 |
+
new_state_dict = {}
|
34 |
+
for key, value in state_dict.items():
|
35 |
+
num = int(key.split(".")[1]) # 0 is always "layers"
|
36 |
+
new_key = key.replace(f"layers.{num}", module.mapping[num])
|
37 |
+
new_state_dict[new_key] = value
|
38 |
+
|
39 |
+
return new_state_dict
|
40 |
+
|
41 |
+
def remap_key(key, state_dict):
|
42 |
+
for k in self.split_keys:
|
43 |
+
if k in key:
|
44 |
+
return key.split(k)[0] + k
|
45 |
+
|
46 |
+
raise ValueError(
|
47 |
+
f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
|
48 |
+
)
|
49 |
+
|
50 |
+
def map_from(module, state_dict, *args, **kwargs):
|
51 |
+
all_keys = list(state_dict.keys())
|
52 |
+
for key in all_keys:
|
53 |
+
replace_key = remap_key(key, state_dict)
|
54 |
+
new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
|
55 |
+
state_dict[new_key] = state_dict[key]
|
56 |
+
del state_dict[key]
|
57 |
+
|
58 |
+
self._register_state_dict_hook(map_to)
|
59 |
+
self._register_load_state_dict_pre_hook(map_from, with_module=True)
|
diffusers/models/README.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Models
|
2 |
+
|
3 |
+
For more detail on the models, please refer to the [docs](https://huggingface.co/docs/diffusers/api/models/overview).
|
diffusers/models/__init__.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ..utils import (
|
18 |
+
DIFFUSERS_SLOW_IMPORT,
|
19 |
+
_LazyModule,
|
20 |
+
is_flax_available,
|
21 |
+
is_torch_available,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
_import_structure = {}
|
26 |
+
|
27 |
+
if is_torch_available():
|
28 |
+
_import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"]
|
29 |
+
_import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"]
|
30 |
+
_import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"]
|
31 |
+
_import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"]
|
32 |
+
_import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"]
|
33 |
+
_import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"]
|
34 |
+
_import_structure["controlnet"] = ["ControlNetModel"]
|
35 |
+
_import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"]
|
36 |
+
_import_structure["embeddings"] = ["ImageProjection"]
|
37 |
+
_import_structure["modeling_utils"] = ["ModelMixin"]
|
38 |
+
_import_structure["prior_transformer"] = ["PriorTransformer"]
|
39 |
+
_import_structure["t5_film_transformer"] = ["T5FilmDecoder"]
|
40 |
+
_import_structure["transformer_2d"] = ["Transformer2DModel"]
|
41 |
+
_import_structure["transformer_temporal"] = ["TransformerTemporalModel"]
|
42 |
+
_import_structure["unet_1d"] = ["UNet1DModel"]
|
43 |
+
_import_structure["unet_2d"] = ["UNet2DModel"]
|
44 |
+
_import_structure["unet_2d_condition"] = ["UNet2DConditionModel"]
|
45 |
+
_import_structure["unet_3d_condition"] = ["UNet3DConditionModel"]
|
46 |
+
_import_structure["unet_kandinsky3"] = ["Kandinsky3UNet"]
|
47 |
+
_import_structure["unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"]
|
48 |
+
_import_structure["unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"]
|
49 |
+
_import_structure["uvit_2d"] = ["UVit2DModel"]
|
50 |
+
_import_structure["vq_model"] = ["VQModel"]
|
51 |
+
|
52 |
+
if is_flax_available():
|
53 |
+
_import_structure["controlnet_flax"] = ["FlaxControlNetModel"]
|
54 |
+
_import_structure["unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
|
55 |
+
_import_structure["vae_flax"] = ["FlaxAutoencoderKL"]
|
56 |
+
|
57 |
+
|
58 |
+
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
59 |
+
if is_torch_available():
|
60 |
+
from .adapter import MultiAdapter, T2IAdapter
|
61 |
+
from .autoencoders import (
|
62 |
+
AsymmetricAutoencoderKL,
|
63 |
+
AutoencoderKL,
|
64 |
+
AutoencoderKLTemporalDecoder,
|
65 |
+
AutoencoderTiny,
|
66 |
+
ConsistencyDecoderVAE,
|
67 |
+
)
|
68 |
+
from .controlnet import ControlNetModel
|
69 |
+
from .dual_transformer_2d import DualTransformer2DModel
|
70 |
+
from .embeddings import ImageProjection
|
71 |
+
from .modeling_utils import ModelMixin
|
72 |
+
from .prior_transformer import PriorTransformer
|
73 |
+
from .t5_film_transformer import T5FilmDecoder
|
74 |
+
from .transformer_2d import Transformer2DModel
|
75 |
+
from .transformer_temporal import TransformerTemporalModel
|
76 |
+
from .unet_1d import UNet1DModel
|
77 |
+
from .unet_2d import UNet2DModel
|
78 |
+
from .unet_2d_condition import UNet2DConditionModel
|
79 |
+
from .unet_3d_condition import UNet3DConditionModel
|
80 |
+
from .unet_kandinsky3 import Kandinsky3UNet
|
81 |
+
from .unet_motion_model import MotionAdapter, UNetMotionModel
|
82 |
+
from .unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel
|
83 |
+
from .uvit_2d import UVit2DModel
|
84 |
+
from .vq_model import VQModel
|
85 |
+
|
86 |
+
if is_flax_available():
|
87 |
+
from .controlnet_flax import FlaxControlNetModel
|
88 |
+
from .unet_2d_condition_flax import FlaxUNet2DConditionModel
|
89 |
+
from .vae_flax import FlaxAutoencoderKL
|
90 |
+
|
91 |
+
else:
|
92 |
+
import sys
|
93 |
+
|
94 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
diffusers/models/activations.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 HuggingFace Inc.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import torch
|
17 |
+
import torch.nn.functional as F
|
18 |
+
from torch import nn
|
19 |
+
|
20 |
+
from ..utils import USE_PEFT_BACKEND
|
21 |
+
from .lora import LoRACompatibleLinear
|
22 |
+
|
23 |
+
|
24 |
+
ACTIVATION_FUNCTIONS = {
|
25 |
+
"swish": nn.SiLU(),
|
26 |
+
"silu": nn.SiLU(),
|
27 |
+
"mish": nn.Mish(),
|
28 |
+
"gelu": nn.GELU(),
|
29 |
+
"relu": nn.ReLU(),
|
30 |
+
}
|
31 |
+
|
32 |
+
|
33 |
+
def get_activation(act_fn: str) -> nn.Module:
|
34 |
+
"""Helper function to get activation function from string.
|
35 |
+
|
36 |
+
Args:
|
37 |
+
act_fn (str): Name of activation function.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
nn.Module: Activation function.
|
41 |
+
"""
|
42 |
+
|
43 |
+
act_fn = act_fn.lower()
|
44 |
+
if act_fn in ACTIVATION_FUNCTIONS:
|
45 |
+
return ACTIVATION_FUNCTIONS[act_fn]
|
46 |
+
else:
|
47 |
+
raise ValueError(f"Unsupported activation function: {act_fn}")
|
48 |
+
|
49 |
+
|
50 |
+
class GELU(nn.Module):
|
51 |
+
r"""
|
52 |
+
GELU activation function with tanh approximation support with `approximate="tanh"`.
|
53 |
+
|
54 |
+
Parameters:
|
55 |
+
dim_in (`int`): The number of channels in the input.
|
56 |
+
dim_out (`int`): The number of channels in the output.
|
57 |
+
approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
|
58 |
+
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
|
59 |
+
"""
|
60 |
+
|
61 |
+
def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True):
|
62 |
+
super().__init__()
|
63 |
+
self.proj = nn.Linear(dim_in, dim_out, bias=bias)
|
64 |
+
self.approximate = approximate
|
65 |
+
|
66 |
+
def gelu(self, gate: torch.Tensor) -> torch.Tensor:
|
67 |
+
if gate.device.type != "mps":
|
68 |
+
return F.gelu(gate, approximate=self.approximate)
|
69 |
+
# mps: gelu is not implemented for float16
|
70 |
+
return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
|
71 |
+
|
72 |
+
def forward(self, hidden_states):
|
73 |
+
hidden_states = self.proj(hidden_states)
|
74 |
+
hidden_states = self.gelu(hidden_states)
|
75 |
+
return hidden_states
|
76 |
+
|
77 |
+
|
78 |
+
class GEGLU(nn.Module):
|
79 |
+
r"""
|
80 |
+
A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.
|
81 |
+
|
82 |
+
Parameters:
|
83 |
+
dim_in (`int`): The number of channels in the input.
|
84 |
+
dim_out (`int`): The number of channels in the output.
|
85 |
+
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
|
86 |
+
"""
|
87 |
+
|
88 |
+
def __init__(self, dim_in: int, dim_out: int, bias: bool = True):
|
89 |
+
super().__init__()
|
90 |
+
linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
|
91 |
+
|
92 |
+
self.proj = linear_cls(dim_in, dim_out * 2, bias=bias)
|
93 |
+
|
94 |
+
def gelu(self, gate: torch.Tensor) -> torch.Tensor:
|
95 |
+
if gate.device.type != "mps":
|
96 |
+
return F.gelu(gate)
|
97 |
+
# mps: gelu is not implemented for float16
|
98 |
+
return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)
|
99 |
+
|
100 |
+
def forward(self, hidden_states, scale: float = 1.0):
|
101 |
+
args = () if USE_PEFT_BACKEND else (scale,)
|
102 |
+
hidden_states, gate = self.proj(hidden_states, *args).chunk(2, dim=-1)
|
103 |
+
return hidden_states * self.gelu(gate)
|
104 |
+
|
105 |
+
|
106 |
+
class ApproximateGELU(nn.Module):
|
107 |
+
r"""
|
108 |
+
The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this
|
109 |
+
[paper](https://arxiv.org/abs/1606.08415).
|
110 |
+
|
111 |
+
Parameters:
|
112 |
+
dim_in (`int`): The number of channels in the input.
|
113 |
+
dim_out (`int`): The number of channels in the output.
|
114 |
+
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
|
115 |
+
"""
|
116 |
+
|
117 |
+
def __init__(self, dim_in: int, dim_out: int, bias: bool = True):
|
118 |
+
super().__init__()
|
119 |
+
self.proj = nn.Linear(dim_in, dim_out, bias=bias)
|
120 |
+
|
121 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
122 |
+
x = self.proj(x)
|
123 |
+
return x * torch.sigmoid(1.702 * x)
|
diffusers/models/adapter.py
ADDED
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import os
|
15 |
+
from typing import Callable, List, Optional, Union
|
16 |
+
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
|
20 |
+
from ..configuration_utils import ConfigMixin, register_to_config
|
21 |
+
from ..utils import logging
|
22 |
+
from .modeling_utils import ModelMixin
|
23 |
+
|
24 |
+
|
25 |
+
logger = logging.get_logger(__name__)
|
26 |
+
|
27 |
+
|
28 |
+
class MultiAdapter(ModelMixin):
|
29 |
+
r"""
|
30 |
+
MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to
|
31 |
+
user-assigned weighting.
|
32 |
+
|
33 |
+
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
|
34 |
+
implements for all the model (such as downloading or saving, etc.)
|
35 |
+
|
36 |
+
Parameters:
|
37 |
+
adapters (`List[T2IAdapter]`, *optional*, defaults to None):
|
38 |
+
A list of `T2IAdapter` model instances.
|
39 |
+
"""
|
40 |
+
|
41 |
+
def __init__(self, adapters: List["T2IAdapter"]):
|
42 |
+
super(MultiAdapter, self).__init__()
|
43 |
+
|
44 |
+
self.num_adapter = len(adapters)
|
45 |
+
self.adapters = nn.ModuleList(adapters)
|
46 |
+
|
47 |
+
if len(adapters) == 0:
|
48 |
+
raise ValueError("Expecting at least one adapter")
|
49 |
+
|
50 |
+
if len(adapters) == 1:
|
51 |
+
raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`")
|
52 |
+
|
53 |
+
# The outputs from each adapter are added together with a weight.
|
54 |
+
# This means that the change in dimensions from downsampling must
|
55 |
+
# be the same for all adapters. Inductively, it also means the
|
56 |
+
# downscale_factor and total_downscale_factor must be the same for all
|
57 |
+
# adapters.
|
58 |
+
first_adapter_total_downscale_factor = adapters[0].total_downscale_factor
|
59 |
+
first_adapter_downscale_factor = adapters[0].downscale_factor
|
60 |
+
for idx in range(1, len(adapters)):
|
61 |
+
if (
|
62 |
+
adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor
|
63 |
+
or adapters[idx].downscale_factor != first_adapter_downscale_factor
|
64 |
+
):
|
65 |
+
raise ValueError(
|
66 |
+
f"Expecting all adapters to have the same downscaling behavior, but got:\n"
|
67 |
+
f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n"
|
68 |
+
f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n"
|
69 |
+
f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n"
|
70 |
+
f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}"
|
71 |
+
)
|
72 |
+
|
73 |
+
self.total_downscale_factor = first_adapter_total_downscale_factor
|
74 |
+
self.downscale_factor = first_adapter_downscale_factor
|
75 |
+
|
76 |
+
def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]:
|
77 |
+
r"""
|
78 |
+
Args:
|
79 |
+
xs (`torch.Tensor`):
|
80 |
+
(batch, channel, height, width) input images for multiple adapter models concated along dimension 1,
|
81 |
+
`channel` should equal to `num_adapter` * "number of channel of image".
|
82 |
+
adapter_weights (`List[float]`, *optional*, defaults to None):
|
83 |
+
List of floats representing the weight which will be multiply to each adapter's output before adding
|
84 |
+
them together.
|
85 |
+
"""
|
86 |
+
if adapter_weights is None:
|
87 |
+
adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter)
|
88 |
+
else:
|
89 |
+
adapter_weights = torch.tensor(adapter_weights)
|
90 |
+
|
91 |
+
accume_state = None
|
92 |
+
for x, w, adapter in zip(xs, adapter_weights, self.adapters):
|
93 |
+
features = adapter(x)
|
94 |
+
if accume_state is None:
|
95 |
+
accume_state = features
|
96 |
+
for i in range(len(accume_state)):
|
97 |
+
accume_state[i] = w * accume_state[i]
|
98 |
+
else:
|
99 |
+
for i in range(len(features)):
|
100 |
+
accume_state[i] += w * features[i]
|
101 |
+
return accume_state
|
102 |
+
|
103 |
+
def save_pretrained(
|
104 |
+
self,
|
105 |
+
save_directory: Union[str, os.PathLike],
|
106 |
+
is_main_process: bool = True,
|
107 |
+
save_function: Callable = None,
|
108 |
+
safe_serialization: bool = True,
|
109 |
+
variant: Optional[str] = None,
|
110 |
+
):
|
111 |
+
"""
|
112 |
+
Save a model and its configuration file to a directory, so that it can be re-loaded using the
|
113 |
+
`[`~models.adapter.MultiAdapter.from_pretrained`]` class method.
|
114 |
+
|
115 |
+
Arguments:
|
116 |
+
save_directory (`str` or `os.PathLike`):
|
117 |
+
Directory to which to save. Will be created if it doesn't exist.
|
118 |
+
is_main_process (`bool`, *optional*, defaults to `True`):
|
119 |
+
Whether the process calling this is the main process or not. Useful when in distributed training like
|
120 |
+
TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
|
121 |
+
the main process to avoid race conditions.
|
122 |
+
save_function (`Callable`):
|
123 |
+
The function to use to save the state dictionary. Useful on distributed training like TPUs when one
|
124 |
+
need to replace `torch.save` by another method. Can be configured with the environment variable
|
125 |
+
`DIFFUSERS_SAVE_MODE`.
|
126 |
+
safe_serialization (`bool`, *optional*, defaults to `True`):
|
127 |
+
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
128 |
+
variant (`str`, *optional*):
|
129 |
+
If specified, weights are saved in the format pytorch_model.<variant>.bin.
|
130 |
+
"""
|
131 |
+
idx = 0
|
132 |
+
model_path_to_save = save_directory
|
133 |
+
for adapter in self.adapters:
|
134 |
+
adapter.save_pretrained(
|
135 |
+
model_path_to_save,
|
136 |
+
is_main_process=is_main_process,
|
137 |
+
save_function=save_function,
|
138 |
+
safe_serialization=safe_serialization,
|
139 |
+
variant=variant,
|
140 |
+
)
|
141 |
+
|
142 |
+
idx += 1
|
143 |
+
model_path_to_save = model_path_to_save + f"_{idx}"
|
144 |
+
|
145 |
+
@classmethod
|
146 |
+
def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
|
147 |
+
r"""
|
148 |
+
Instantiate a pretrained MultiAdapter model from multiple pre-trained adapter models.
|
149 |
+
|
150 |
+
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
|
151 |
+
the model, you should first set it back in training mode with `model.train()`.
|
152 |
+
|
153 |
+
The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
|
154 |
+
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
|
155 |
+
task.
|
156 |
+
|
157 |
+
The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
|
158 |
+
weights are discarded.
|
159 |
+
|
160 |
+
Parameters:
|
161 |
+
pretrained_model_path (`os.PathLike`):
|
162 |
+
A path to a *directory* containing model weights saved using
|
163 |
+
[`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`.
|
164 |
+
torch_dtype (`str` or `torch.dtype`, *optional*):
|
165 |
+
Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
|
166 |
+
will be automatically derived from the model's weights.
|
167 |
+
output_loading_info(`bool`, *optional*, defaults to `False`):
|
168 |
+
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
|
169 |
+
device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
|
170 |
+
A map that specifies where each submodule should go. It doesn't need to be refined to each
|
171 |
+
parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
|
172 |
+
same device.
|
173 |
+
|
174 |
+
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
|
175 |
+
more information about each option see [designing a device
|
176 |
+
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
|
177 |
+
max_memory (`Dict`, *optional*):
|
178 |
+
A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
|
179 |
+
GPU and the available CPU RAM if unset.
|
180 |
+
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
|
181 |
+
Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
|
182 |
+
also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
|
183 |
+
model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
|
184 |
+
setting this argument to `True` will raise an error.
|
185 |
+
variant (`str`, *optional*):
|
186 |
+
If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
|
187 |
+
ignored when using `from_flax`.
|
188 |
+
use_safetensors (`bool`, *optional*, defaults to `None`):
|
189 |
+
If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
|
190 |
+
`safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
|
191 |
+
`safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
|
192 |
+
"""
|
193 |
+
idx = 0
|
194 |
+
adapters = []
|
195 |
+
|
196 |
+
# load adapter and append to list until no adapter directory exists anymore
|
197 |
+
# first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained`
|
198 |
+
# second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ...
|
199 |
+
model_path_to_load = pretrained_model_path
|
200 |
+
while os.path.isdir(model_path_to_load):
|
201 |
+
adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs)
|
202 |
+
adapters.append(adapter)
|
203 |
+
|
204 |
+
idx += 1
|
205 |
+
model_path_to_load = pretrained_model_path + f"_{idx}"
|
206 |
+
|
207 |
+
logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.")
|
208 |
+
|
209 |
+
if len(adapters) == 0:
|
210 |
+
raise ValueError(
|
211 |
+
f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
|
212 |
+
)
|
213 |
+
|
214 |
+
return cls(adapters)
|
215 |
+
|
216 |
+
|
217 |
+
class T2IAdapter(ModelMixin, ConfigMixin):
|
218 |
+
r"""
|
219 |
+
A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model
|
220 |
+
generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's
|
221 |
+
architecture follows the original implementation of
|
222 |
+
[Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97)
|
223 |
+
and
|
224 |
+
[AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235).
|
225 |
+
|
226 |
+
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
|
227 |
+
implements for all the model (such as downloading or saving, etc.)
|
228 |
+
|
229 |
+
Parameters:
|
230 |
+
in_channels (`int`, *optional*, defaults to 3):
|
231 |
+
Number of channels of Aapter's input(*control image*). Set this parameter to 1 if you're using gray scale
|
232 |
+
image as *control image*.
|
233 |
+
channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
|
234 |
+
The number of channel of each downsample block's output hidden state. The `len(block_out_channels)` will
|
235 |
+
also determine the number of downsample blocks in the Adapter.
|
236 |
+
num_res_blocks (`int`, *optional*, defaults to 2):
|
237 |
+
Number of ResNet blocks in each downsample block.
|
238 |
+
downscale_factor (`int`, *optional*, defaults to 8):
|
239 |
+
A factor that determines the total downscale factor of the Adapter.
|
240 |
+
adapter_type (`str`, *optional*, defaults to `full_adapter`):
|
241 |
+
The type of Adapter to use. Choose either `full_adapter` or `full_adapter_xl` or `light_adapter`.
|
242 |
+
"""
|
243 |
+
|
244 |
+
@register_to_config
|
245 |
+
def __init__(
|
246 |
+
self,
|
247 |
+
in_channels: int = 3,
|
248 |
+
channels: List[int] = [320, 640, 1280, 1280],
|
249 |
+
num_res_blocks: int = 2,
|
250 |
+
downscale_factor: int = 8,
|
251 |
+
adapter_type: str = "full_adapter",
|
252 |
+
):
|
253 |
+
super().__init__()
|
254 |
+
|
255 |
+
if adapter_type == "full_adapter":
|
256 |
+
self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor)
|
257 |
+
elif adapter_type == "full_adapter_xl":
|
258 |
+
self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor)
|
259 |
+
elif adapter_type == "light_adapter":
|
260 |
+
self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor)
|
261 |
+
else:
|
262 |
+
raise ValueError(
|
263 |
+
f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or "
|
264 |
+
"'full_adapter_xl' or 'light_adapter'."
|
265 |
+
)
|
266 |
+
|
267 |
+
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
268 |
+
r"""
|
269 |
+
This function processes the input tensor `x` through the adapter model and returns a list of feature tensors,
|
270 |
+
each representing information extracted at a different scale from the input. The length of the list is
|
271 |
+
determined by the number of downsample blocks in the Adapter, as specified by the `channels` and
|
272 |
+
`num_res_blocks` parameters during initialization.
|
273 |
+
"""
|
274 |
+
return self.adapter(x)
|
275 |
+
|
276 |
+
@property
|
277 |
+
def total_downscale_factor(self):
|
278 |
+
return self.adapter.total_downscale_factor
|
279 |
+
|
280 |
+
@property
|
281 |
+
def downscale_factor(self):
|
282 |
+
"""The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are
|
283 |
+
not evenly divisible by the downscale_factor then an exception will be raised.
|
284 |
+
"""
|
285 |
+
return self.adapter.unshuffle.downscale_factor
|
286 |
+
|
287 |
+
|
288 |
+
# full adapter
|
289 |
+
|
290 |
+
|
291 |
+
class FullAdapter(nn.Module):
|
292 |
+
r"""
|
293 |
+
See [`T2IAdapter`] for more information.
|
294 |
+
"""
|
295 |
+
|
296 |
+
def __init__(
|
297 |
+
self,
|
298 |
+
in_channels: int = 3,
|
299 |
+
channels: List[int] = [320, 640, 1280, 1280],
|
300 |
+
num_res_blocks: int = 2,
|
301 |
+
downscale_factor: int = 8,
|
302 |
+
):
|
303 |
+
super().__init__()
|
304 |
+
|
305 |
+
in_channels = in_channels * downscale_factor**2
|
306 |
+
|
307 |
+
self.unshuffle = nn.PixelUnshuffle(downscale_factor)
|
308 |
+
self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
|
309 |
+
|
310 |
+
self.body = nn.ModuleList(
|
311 |
+
[
|
312 |
+
AdapterBlock(channels[0], channels[0], num_res_blocks),
|
313 |
+
*[
|
314 |
+
AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)
|
315 |
+
for i in range(1, len(channels))
|
316 |
+
],
|
317 |
+
]
|
318 |
+
)
|
319 |
+
|
320 |
+
self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1)
|
321 |
+
|
322 |
+
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
323 |
+
r"""
|
324 |
+
This method processes the input tensor `x` through the FullAdapter model and performs operations including
|
325 |
+
pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each
|
326 |
+
capturing information at a different stage of processing within the FullAdapter model. The number of feature
|
327 |
+
tensors in the list is determined by the number of downsample blocks specified during initialization.
|
328 |
+
"""
|
329 |
+
x = self.unshuffle(x)
|
330 |
+
x = self.conv_in(x)
|
331 |
+
|
332 |
+
features = []
|
333 |
+
|
334 |
+
for block in self.body:
|
335 |
+
x = block(x)
|
336 |
+
features.append(x)
|
337 |
+
|
338 |
+
return features
|
339 |
+
|
340 |
+
|
341 |
+
class FullAdapterXL(nn.Module):
|
342 |
+
r"""
|
343 |
+
See [`T2IAdapter`] for more information.
|
344 |
+
"""
|
345 |
+
|
346 |
+
def __init__(
|
347 |
+
self,
|
348 |
+
in_channels: int = 3,
|
349 |
+
channels: List[int] = [320, 640, 1280, 1280],
|
350 |
+
num_res_blocks: int = 2,
|
351 |
+
downscale_factor: int = 16,
|
352 |
+
):
|
353 |
+
super().__init__()
|
354 |
+
|
355 |
+
in_channels = in_channels * downscale_factor**2
|
356 |
+
|
357 |
+
self.unshuffle = nn.PixelUnshuffle(downscale_factor)
|
358 |
+
self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
|
359 |
+
|
360 |
+
self.body = []
|
361 |
+
# blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32]
|
362 |
+
for i in range(len(channels)):
|
363 |
+
if i == 1:
|
364 |
+
self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks))
|
365 |
+
elif i == 2:
|
366 |
+
self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True))
|
367 |
+
else:
|
368 |
+
self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks))
|
369 |
+
|
370 |
+
self.body = nn.ModuleList(self.body)
|
371 |
+
# XL has only one downsampling AdapterBlock.
|
372 |
+
self.total_downscale_factor = downscale_factor * 2
|
373 |
+
|
374 |
+
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
375 |
+
r"""
|
376 |
+
This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations
|
377 |
+
including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors.
|
378 |
+
"""
|
379 |
+
x = self.unshuffle(x)
|
380 |
+
x = self.conv_in(x)
|
381 |
+
|
382 |
+
features = []
|
383 |
+
|
384 |
+
for block in self.body:
|
385 |
+
x = block(x)
|
386 |
+
features.append(x)
|
387 |
+
|
388 |
+
return features
|
389 |
+
|
390 |
+
|
391 |
+
class AdapterBlock(nn.Module):
|
392 |
+
r"""
|
393 |
+
An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and
|
394 |
+
`FullAdapterXL` models.
|
395 |
+
|
396 |
+
Parameters:
|
397 |
+
in_channels (`int`):
|
398 |
+
Number of channels of AdapterBlock's input.
|
399 |
+
out_channels (`int`):
|
400 |
+
Number of channels of AdapterBlock's output.
|
401 |
+
num_res_blocks (`int`):
|
402 |
+
Number of ResNet blocks in the AdapterBlock.
|
403 |
+
down (`bool`, *optional*, defaults to `False`):
|
404 |
+
Whether to perform downsampling on AdapterBlock's input.
|
405 |
+
"""
|
406 |
+
|
407 |
+
def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
|
408 |
+
super().__init__()
|
409 |
+
|
410 |
+
self.downsample = None
|
411 |
+
if down:
|
412 |
+
self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
|
413 |
+
|
414 |
+
self.in_conv = None
|
415 |
+
if in_channels != out_channels:
|
416 |
+
self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
|
417 |
+
|
418 |
+
self.resnets = nn.Sequential(
|
419 |
+
*[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)],
|
420 |
+
)
|
421 |
+
|
422 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
423 |
+
r"""
|
424 |
+
This method takes tensor x as input and performs operations downsampling and convolutional layers if the
|
425 |
+
self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of
|
426 |
+
residual blocks to the input tensor.
|
427 |
+
"""
|
428 |
+
if self.downsample is not None:
|
429 |
+
x = self.downsample(x)
|
430 |
+
|
431 |
+
if self.in_conv is not None:
|
432 |
+
x = self.in_conv(x)
|
433 |
+
|
434 |
+
x = self.resnets(x)
|
435 |
+
|
436 |
+
return x
|
437 |
+
|
438 |
+
|
439 |
+
class AdapterResnetBlock(nn.Module):
|
440 |
+
r"""
|
441 |
+
An `AdapterResnetBlock` is a helper model that implements a ResNet-like block.
|
442 |
+
|
443 |
+
Parameters:
|
444 |
+
channels (`int`):
|
445 |
+
Number of channels of AdapterResnetBlock's input and output.
|
446 |
+
"""
|
447 |
+
|
448 |
+
def __init__(self, channels: int):
|
449 |
+
super().__init__()
|
450 |
+
self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
|
451 |
+
self.act = nn.ReLU()
|
452 |
+
self.block2 = nn.Conv2d(channels, channels, kernel_size=1)
|
453 |
+
|
454 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
455 |
+
r"""
|
456 |
+
This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional
|
457 |
+
layer on the input tensor. It returns addition with the input tensor.
|
458 |
+
"""
|
459 |
+
|
460 |
+
h = self.act(self.block1(x))
|
461 |
+
h = self.block2(h)
|
462 |
+
|
463 |
+
return h + x
|
464 |
+
|
465 |
+
|
466 |
+
# light adapter
|
467 |
+
|
468 |
+
|
469 |
+
class LightAdapter(nn.Module):
|
470 |
+
r"""
|
471 |
+
See [`T2IAdapter`] for more information.
|
472 |
+
"""
|
473 |
+
|
474 |
+
def __init__(
|
475 |
+
self,
|
476 |
+
in_channels: int = 3,
|
477 |
+
channels: List[int] = [320, 640, 1280],
|
478 |
+
num_res_blocks: int = 4,
|
479 |
+
downscale_factor: int = 8,
|
480 |
+
):
|
481 |
+
super().__init__()
|
482 |
+
|
483 |
+
in_channels = in_channels * downscale_factor**2
|
484 |
+
|
485 |
+
self.unshuffle = nn.PixelUnshuffle(downscale_factor)
|
486 |
+
|
487 |
+
self.body = nn.ModuleList(
|
488 |
+
[
|
489 |
+
LightAdapterBlock(in_channels, channels[0], num_res_blocks),
|
490 |
+
*[
|
491 |
+
LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True)
|
492 |
+
for i in range(len(channels) - 1)
|
493 |
+
],
|
494 |
+
LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True),
|
495 |
+
]
|
496 |
+
)
|
497 |
+
|
498 |
+
self.total_downscale_factor = downscale_factor * (2 ** len(channels))
|
499 |
+
|
500 |
+
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
501 |
+
r"""
|
502 |
+
This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each
|
503 |
+
feature tensor corresponds to a different level of processing within the LightAdapter.
|
504 |
+
"""
|
505 |
+
x = self.unshuffle(x)
|
506 |
+
|
507 |
+
features = []
|
508 |
+
|
509 |
+
for block in self.body:
|
510 |
+
x = block(x)
|
511 |
+
features.append(x)
|
512 |
+
|
513 |
+
return features
|
514 |
+
|
515 |
+
|
516 |
+
class LightAdapterBlock(nn.Module):
|
517 |
+
r"""
|
518 |
+
A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the
|
519 |
+
`LightAdapter` model.
|
520 |
+
|
521 |
+
Parameters:
|
522 |
+
in_channels (`int`):
|
523 |
+
Number of channels of LightAdapterBlock's input.
|
524 |
+
out_channels (`int`):
|
525 |
+
Number of channels of LightAdapterBlock's output.
|
526 |
+
num_res_blocks (`int`):
|
527 |
+
Number of LightAdapterResnetBlocks in the LightAdapterBlock.
|
528 |
+
down (`bool`, *optional*, defaults to `False`):
|
529 |
+
Whether to perform downsampling on LightAdapterBlock's input.
|
530 |
+
"""
|
531 |
+
|
532 |
+
def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
|
533 |
+
super().__init__()
|
534 |
+
mid_channels = out_channels // 4
|
535 |
+
|
536 |
+
self.downsample = None
|
537 |
+
if down:
|
538 |
+
self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
|
539 |
+
|
540 |
+
self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1)
|
541 |
+
self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)])
|
542 |
+
self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1)
|
543 |
+
|
544 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
545 |
+
r"""
|
546 |
+
This method takes tensor x as input and performs downsampling if required. Then it applies in convolution
|
547 |
+
layer, a sequence of residual blocks, and out convolutional layer.
|
548 |
+
"""
|
549 |
+
if self.downsample is not None:
|
550 |
+
x = self.downsample(x)
|
551 |
+
|
552 |
+
x = self.in_conv(x)
|
553 |
+
x = self.resnets(x)
|
554 |
+
x = self.out_conv(x)
|
555 |
+
|
556 |
+
return x
|
557 |
+
|
558 |
+
|
559 |
+
class LightAdapterResnetBlock(nn.Module):
|
560 |
+
"""
|
561 |
+
A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different
|
562 |
+
architecture than `AdapterResnetBlock`.
|
563 |
+
|
564 |
+
Parameters:
|
565 |
+
channels (`int`):
|
566 |
+
Number of channels of LightAdapterResnetBlock's input and output.
|
567 |
+
"""
|
568 |
+
|
569 |
+
def __init__(self, channels: int):
|
570 |
+
super().__init__()
|
571 |
+
self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
|
572 |
+
self.act = nn.ReLU()
|
573 |
+
self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
|
574 |
+
|
575 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
576 |
+
r"""
|
577 |
+
This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and
|
578 |
+
another convolutional layer and adds it to input tensor.
|
579 |
+
"""
|
580 |
+
|
581 |
+
h = self.act(self.block1(x))
|
582 |
+
h = self.block2(h)
|
583 |
+
|
584 |
+
return h + x
|
diffusers/models/attention.py
ADDED
@@ -0,0 +1,668 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import Any, Dict, Optional
|
15 |
+
|
16 |
+
import torch
|
17 |
+
import torch.nn.functional as F
|
18 |
+
from torch import nn
|
19 |
+
|
20 |
+
from ..utils import USE_PEFT_BACKEND
|
21 |
+
from ..utils.torch_utils import maybe_allow_in_graph
|
22 |
+
from .activations import GEGLU, GELU, ApproximateGELU
|
23 |
+
from .attention_processor import Attention
|
24 |
+
from .embeddings import SinusoidalPositionalEmbedding
|
25 |
+
from .lora import LoRACompatibleLinear
|
26 |
+
from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm
|
27 |
+
|
28 |
+
|
29 |
+
def _chunked_feed_forward(
|
30 |
+
ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None
|
31 |
+
):
|
32 |
+
# "feed_forward_chunk_size" can be used to save memory
|
33 |
+
if hidden_states.shape[chunk_dim] % chunk_size != 0:
|
34 |
+
raise ValueError(
|
35 |
+
f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
|
36 |
+
)
|
37 |
+
|
38 |
+
num_chunks = hidden_states.shape[chunk_dim] // chunk_size
|
39 |
+
if lora_scale is None:
|
40 |
+
ff_output = torch.cat(
|
41 |
+
[ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
|
42 |
+
dim=chunk_dim,
|
43 |
+
)
|
44 |
+
else:
|
45 |
+
# TOOD(Patrick): LoRA scale can be removed once PEFT refactor is complete
|
46 |
+
ff_output = torch.cat(
|
47 |
+
[ff(hid_slice, scale=lora_scale) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
|
48 |
+
dim=chunk_dim,
|
49 |
+
)
|
50 |
+
|
51 |
+
return ff_output
|
52 |
+
|
53 |
+
|
54 |
+
@maybe_allow_in_graph
|
55 |
+
class GatedSelfAttentionDense(nn.Module):
|
56 |
+
r"""
|
57 |
+
A gated self-attention dense layer that combines visual features and object features.
|
58 |
+
|
59 |
+
Parameters:
|
60 |
+
query_dim (`int`): The number of channels in the query.
|
61 |
+
context_dim (`int`): The number of channels in the context.
|
62 |
+
n_heads (`int`): The number of heads to use for attention.
|
63 |
+
d_head (`int`): The number of channels in each head.
|
64 |
+
"""
|
65 |
+
|
66 |
+
def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):
|
67 |
+
super().__init__()
|
68 |
+
|
69 |
+
# we need a linear projection since we need cat visual feature and obj feature
|
70 |
+
self.linear = nn.Linear(context_dim, query_dim)
|
71 |
+
|
72 |
+
self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
|
73 |
+
self.ff = FeedForward(query_dim, activation_fn="geglu")
|
74 |
+
|
75 |
+
self.norm1 = nn.LayerNorm(query_dim)
|
76 |
+
self.norm2 = nn.LayerNorm(query_dim)
|
77 |
+
|
78 |
+
self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0)))
|
79 |
+
self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0)))
|
80 |
+
|
81 |
+
self.enabled = True
|
82 |
+
|
83 |
+
def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
|
84 |
+
if not self.enabled:
|
85 |
+
return x
|
86 |
+
|
87 |
+
n_visual = x.shape[1]
|
88 |
+
objs = self.linear(objs)
|
89 |
+
|
90 |
+
x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
|
91 |
+
x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
|
92 |
+
|
93 |
+
return x
|
94 |
+
|
95 |
+
|
96 |
+
@maybe_allow_in_graph
|
97 |
+
class BasicTransformerBlock(nn.Module):
|
98 |
+
r"""
|
99 |
+
A basic Transformer block.
|
100 |
+
|
101 |
+
Parameters:
|
102 |
+
dim (`int`): The number of channels in the input and output.
|
103 |
+
num_attention_heads (`int`): The number of heads to use for multi-head attention.
|
104 |
+
attention_head_dim (`int`): The number of channels in each head.
|
105 |
+
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
106 |
+
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
|
107 |
+
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
108 |
+
num_embeds_ada_norm (:
|
109 |
+
obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
|
110 |
+
attention_bias (:
|
111 |
+
obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
|
112 |
+
only_cross_attention (`bool`, *optional*):
|
113 |
+
Whether to use only cross-attention layers. In this case two cross attention layers are used.
|
114 |
+
double_self_attention (`bool`, *optional*):
|
115 |
+
Whether to use two self-attention layers. In this case no cross attention layers are used.
|
116 |
+
upcast_attention (`bool`, *optional*):
|
117 |
+
Whether to upcast the attention computation to float32. This is useful for mixed precision training.
|
118 |
+
norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
|
119 |
+
Whether to use learnable elementwise affine parameters for normalization.
|
120 |
+
norm_type (`str`, *optional*, defaults to `"layer_norm"`):
|
121 |
+
The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
|
122 |
+
final_dropout (`bool` *optional*, defaults to False):
|
123 |
+
Whether to apply a final dropout after the last feed-forward layer.
|
124 |
+
attention_type (`str`, *optional*, defaults to `"default"`):
|
125 |
+
The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
|
126 |
+
positional_embeddings (`str`, *optional*, defaults to `None`):
|
127 |
+
The type of positional embeddings to apply to.
|
128 |
+
num_positional_embeddings (`int`, *optional*, defaults to `None`):
|
129 |
+
The maximum number of positional embeddings to apply.
|
130 |
+
"""
|
131 |
+
|
132 |
+
def __init__(
|
133 |
+
self,
|
134 |
+
dim: int,
|
135 |
+
num_attention_heads: int,
|
136 |
+
attention_head_dim: int,
|
137 |
+
dropout=0.0,
|
138 |
+
cross_attention_dim: Optional[int] = None,
|
139 |
+
activation_fn: str = "geglu",
|
140 |
+
num_embeds_ada_norm: Optional[int] = None,
|
141 |
+
attention_bias: bool = False,
|
142 |
+
only_cross_attention: bool = False,
|
143 |
+
double_self_attention: bool = False,
|
144 |
+
upcast_attention: bool = False,
|
145 |
+
norm_elementwise_affine: bool = True,
|
146 |
+
norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single'
|
147 |
+
norm_eps: float = 1e-5,
|
148 |
+
final_dropout: bool = False,
|
149 |
+
attention_type: str = "default",
|
150 |
+
positional_embeddings: Optional[str] = None,
|
151 |
+
num_positional_embeddings: Optional[int] = None,
|
152 |
+
ada_norm_continous_conditioning_embedding_dim: Optional[int] = None,
|
153 |
+
ada_norm_bias: Optional[int] = None,
|
154 |
+
ff_inner_dim: Optional[int] = None,
|
155 |
+
ff_bias: bool = True,
|
156 |
+
attention_out_bias: bool = True,
|
157 |
+
):
|
158 |
+
super().__init__()
|
159 |
+
self.only_cross_attention = only_cross_attention
|
160 |
+
|
161 |
+
self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
|
162 |
+
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
|
163 |
+
self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
|
164 |
+
self.use_layer_norm = norm_type == "layer_norm"
|
165 |
+
self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous"
|
166 |
+
|
167 |
+
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
|
168 |
+
raise ValueError(
|
169 |
+
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
|
170 |
+
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
|
171 |
+
)
|
172 |
+
|
173 |
+
if positional_embeddings and (num_positional_embeddings is None):
|
174 |
+
raise ValueError(
|
175 |
+
"If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
|
176 |
+
)
|
177 |
+
|
178 |
+
if positional_embeddings == "sinusoidal":
|
179 |
+
self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
|
180 |
+
else:
|
181 |
+
self.pos_embed = None
|
182 |
+
|
183 |
+
# Define 3 blocks. Each block has its own normalization layer.
|
184 |
+
# 1. Self-Attn
|
185 |
+
if self.use_ada_layer_norm:
|
186 |
+
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
|
187 |
+
elif self.use_ada_layer_norm_zero:
|
188 |
+
self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
|
189 |
+
elif self.use_ada_layer_norm_continuous:
|
190 |
+
self.norm1 = AdaLayerNormContinuous(
|
191 |
+
dim,
|
192 |
+
ada_norm_continous_conditioning_embedding_dim,
|
193 |
+
norm_elementwise_affine,
|
194 |
+
norm_eps,
|
195 |
+
ada_norm_bias,
|
196 |
+
"rms_norm",
|
197 |
+
)
|
198 |
+
else:
|
199 |
+
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
|
200 |
+
|
201 |
+
self.attn1 = Attention(
|
202 |
+
query_dim=dim,
|
203 |
+
heads=num_attention_heads,
|
204 |
+
dim_head=attention_head_dim,
|
205 |
+
dropout=dropout,
|
206 |
+
bias=attention_bias,
|
207 |
+
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
|
208 |
+
upcast_attention=upcast_attention,
|
209 |
+
out_bias=attention_out_bias,
|
210 |
+
)
|
211 |
+
|
212 |
+
# 2. Cross-Attn
|
213 |
+
if cross_attention_dim is not None or double_self_attention:
|
214 |
+
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
|
215 |
+
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
|
216 |
+
# the second cross attention block.
|
217 |
+
if self.use_ada_layer_norm:
|
218 |
+
self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm)
|
219 |
+
elif self.use_ada_layer_norm_continuous:
|
220 |
+
self.norm2 = AdaLayerNormContinuous(
|
221 |
+
dim,
|
222 |
+
ada_norm_continous_conditioning_embedding_dim,
|
223 |
+
norm_elementwise_affine,
|
224 |
+
norm_eps,
|
225 |
+
ada_norm_bias,
|
226 |
+
"rms_norm",
|
227 |
+
)
|
228 |
+
else:
|
229 |
+
self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
|
230 |
+
|
231 |
+
self.attn2 = Attention(
|
232 |
+
query_dim=dim,
|
233 |
+
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
|
234 |
+
heads=num_attention_heads,
|
235 |
+
dim_head=attention_head_dim,
|
236 |
+
dropout=dropout,
|
237 |
+
bias=attention_bias,
|
238 |
+
upcast_attention=upcast_attention,
|
239 |
+
out_bias=attention_out_bias,
|
240 |
+
) # is self-attn if encoder_hidden_states is none
|
241 |
+
else:
|
242 |
+
self.norm2 = None
|
243 |
+
self.attn2 = None
|
244 |
+
|
245 |
+
# 3. Feed-forward
|
246 |
+
if self.use_ada_layer_norm_continuous:
|
247 |
+
self.norm3 = AdaLayerNormContinuous(
|
248 |
+
dim,
|
249 |
+
ada_norm_continous_conditioning_embedding_dim,
|
250 |
+
norm_elementwise_affine,
|
251 |
+
norm_eps,
|
252 |
+
ada_norm_bias,
|
253 |
+
"layer_norm",
|
254 |
+
)
|
255 |
+
elif not self.use_ada_layer_norm_single:
|
256 |
+
self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
|
257 |
+
|
258 |
+
self.ff = FeedForward(
|
259 |
+
dim,
|
260 |
+
dropout=dropout,
|
261 |
+
activation_fn=activation_fn,
|
262 |
+
final_dropout=final_dropout,
|
263 |
+
inner_dim=ff_inner_dim,
|
264 |
+
bias=ff_bias,
|
265 |
+
)
|
266 |
+
|
267 |
+
# 4. Fuser
|
268 |
+
if attention_type == "gated" or attention_type == "gated-text-image":
|
269 |
+
self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)
|
270 |
+
|
271 |
+
# 5. Scale-shift for PixArt-Alpha.
|
272 |
+
if self.use_ada_layer_norm_single:
|
273 |
+
self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
|
274 |
+
|
275 |
+
# let chunk size default to None
|
276 |
+
self._chunk_size = None
|
277 |
+
self._chunk_dim = 0
|
278 |
+
|
279 |
+
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
|
280 |
+
# Sets chunk feed-forward
|
281 |
+
self._chunk_size = chunk_size
|
282 |
+
self._chunk_dim = dim
|
283 |
+
|
284 |
+
def forward(
|
285 |
+
self,
|
286 |
+
hidden_states: torch.FloatTensor,
|
287 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
288 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
289 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
290 |
+
timestep: Optional[torch.LongTensor] = None,
|
291 |
+
cross_attention_kwargs: Dict[str, Any] = None,
|
292 |
+
class_labels: Optional[torch.LongTensor] = None,
|
293 |
+
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
|
294 |
+
) -> torch.FloatTensor:
|
295 |
+
# Notice that normalization is always applied before the real computation in the following blocks.
|
296 |
+
# 0. Self-Attention
|
297 |
+
batch_size = hidden_states.shape[0]
|
298 |
+
|
299 |
+
if self.use_ada_layer_norm:
|
300 |
+
norm_hidden_states = self.norm1(hidden_states, timestep)
|
301 |
+
elif self.use_ada_layer_norm_zero:
|
302 |
+
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
|
303 |
+
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
|
304 |
+
)
|
305 |
+
elif self.use_layer_norm:
|
306 |
+
norm_hidden_states = self.norm1(hidden_states)
|
307 |
+
elif self.use_ada_layer_norm_continuous:
|
308 |
+
norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"])
|
309 |
+
elif self.use_ada_layer_norm_single:
|
310 |
+
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
|
311 |
+
self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
|
312 |
+
).chunk(6, dim=1)
|
313 |
+
norm_hidden_states = self.norm1(hidden_states)
|
314 |
+
norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
|
315 |
+
norm_hidden_states = norm_hidden_states.squeeze(1)
|
316 |
+
else:
|
317 |
+
raise ValueError("Incorrect norm used")
|
318 |
+
|
319 |
+
if self.pos_embed is not None:
|
320 |
+
norm_hidden_states = self.pos_embed(norm_hidden_states)
|
321 |
+
|
322 |
+
# 1. Retrieve lora scale.
|
323 |
+
lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
|
324 |
+
|
325 |
+
# 2. Prepare GLIGEN inputs
|
326 |
+
cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
|
327 |
+
gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
|
328 |
+
|
329 |
+
attn_output = self.attn1(
|
330 |
+
norm_hidden_states,
|
331 |
+
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
332 |
+
attention_mask=attention_mask,
|
333 |
+
**cross_attention_kwargs,
|
334 |
+
)
|
335 |
+
if self.use_ada_layer_norm_zero:
|
336 |
+
attn_output = gate_msa.unsqueeze(1) * attn_output
|
337 |
+
elif self.use_ada_layer_norm_single:
|
338 |
+
attn_output = gate_msa * attn_output
|
339 |
+
|
340 |
+
hidden_states = attn_output + hidden_states
|
341 |
+
if hidden_states.ndim == 4:
|
342 |
+
hidden_states = hidden_states.squeeze(1)
|
343 |
+
|
344 |
+
# 2.5 GLIGEN Control
|
345 |
+
if gligen_kwargs is not None:
|
346 |
+
hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
|
347 |
+
|
348 |
+
# 3. Cross-Attention
|
349 |
+
if self.attn2 is not None:
|
350 |
+
if self.use_ada_layer_norm:
|
351 |
+
norm_hidden_states = self.norm2(hidden_states, timestep)
|
352 |
+
elif self.use_ada_layer_norm_zero or self.use_layer_norm:
|
353 |
+
norm_hidden_states = self.norm2(hidden_states)
|
354 |
+
elif self.use_ada_layer_norm_single:
|
355 |
+
# For PixArt norm2 isn't applied here:
|
356 |
+
# https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
|
357 |
+
norm_hidden_states = hidden_states
|
358 |
+
elif self.use_ada_layer_norm_continuous:
|
359 |
+
norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"])
|
360 |
+
else:
|
361 |
+
raise ValueError("Incorrect norm")
|
362 |
+
|
363 |
+
if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
|
364 |
+
norm_hidden_states = self.pos_embed(norm_hidden_states)
|
365 |
+
|
366 |
+
attn_output = self.attn2(
|
367 |
+
norm_hidden_states,
|
368 |
+
encoder_hidden_states=encoder_hidden_states,
|
369 |
+
attention_mask=encoder_attention_mask,
|
370 |
+
**cross_attention_kwargs,
|
371 |
+
)
|
372 |
+
hidden_states = attn_output + hidden_states
|
373 |
+
|
374 |
+
# 4. Feed-forward
|
375 |
+
if self.use_ada_layer_norm_continuous:
|
376 |
+
norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"])
|
377 |
+
elif not self.use_ada_layer_norm_single:
|
378 |
+
norm_hidden_states = self.norm3(hidden_states)
|
379 |
+
|
380 |
+
if self.use_ada_layer_norm_zero:
|
381 |
+
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
|
382 |
+
|
383 |
+
if self.use_ada_layer_norm_single:
|
384 |
+
norm_hidden_states = self.norm2(hidden_states)
|
385 |
+
norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
|
386 |
+
|
387 |
+
if self._chunk_size is not None:
|
388 |
+
# "feed_forward_chunk_size" can be used to save memory
|
389 |
+
ff_output = _chunked_feed_forward(
|
390 |
+
self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size, lora_scale=lora_scale
|
391 |
+
)
|
392 |
+
else:
|
393 |
+
ff_output = self.ff(norm_hidden_states, scale=lora_scale)
|
394 |
+
|
395 |
+
if self.use_ada_layer_norm_zero:
|
396 |
+
ff_output = gate_mlp.unsqueeze(1) * ff_output
|
397 |
+
elif self.use_ada_layer_norm_single:
|
398 |
+
ff_output = gate_mlp * ff_output
|
399 |
+
|
400 |
+
hidden_states = ff_output + hidden_states
|
401 |
+
if hidden_states.ndim == 4:
|
402 |
+
hidden_states = hidden_states.squeeze(1)
|
403 |
+
|
404 |
+
return hidden_states
|
405 |
+
|
406 |
+
|
407 |
+
@maybe_allow_in_graph
|
408 |
+
class TemporalBasicTransformerBlock(nn.Module):
|
409 |
+
r"""
|
410 |
+
A basic Transformer block for video like data.
|
411 |
+
|
412 |
+
Parameters:
|
413 |
+
dim (`int`): The number of channels in the input and output.
|
414 |
+
time_mix_inner_dim (`int`): The number of channels for temporal attention.
|
415 |
+
num_attention_heads (`int`): The number of heads to use for multi-head attention.
|
416 |
+
attention_head_dim (`int`): The number of channels in each head.
|
417 |
+
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
|
418 |
+
"""
|
419 |
+
|
420 |
+
def __init__(
|
421 |
+
self,
|
422 |
+
dim: int,
|
423 |
+
time_mix_inner_dim: int,
|
424 |
+
num_attention_heads: int,
|
425 |
+
attention_head_dim: int,
|
426 |
+
cross_attention_dim: Optional[int] = None,
|
427 |
+
):
|
428 |
+
super().__init__()
|
429 |
+
self.is_res = dim == time_mix_inner_dim
|
430 |
+
|
431 |
+
self.norm_in = nn.LayerNorm(dim)
|
432 |
+
|
433 |
+
# Define 3 blocks. Each block has its own normalization layer.
|
434 |
+
# 1. Self-Attn
|
435 |
+
self.norm_in = nn.LayerNorm(dim)
|
436 |
+
self.ff_in = FeedForward(
|
437 |
+
dim,
|
438 |
+
dim_out=time_mix_inner_dim,
|
439 |
+
activation_fn="geglu",
|
440 |
+
)
|
441 |
+
|
442 |
+
self.norm1 = nn.LayerNorm(time_mix_inner_dim)
|
443 |
+
self.attn1 = Attention(
|
444 |
+
query_dim=time_mix_inner_dim,
|
445 |
+
heads=num_attention_heads,
|
446 |
+
dim_head=attention_head_dim,
|
447 |
+
cross_attention_dim=None,
|
448 |
+
)
|
449 |
+
|
450 |
+
# 2. Cross-Attn
|
451 |
+
if cross_attention_dim is not None:
|
452 |
+
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
|
453 |
+
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
|
454 |
+
# the second cross attention block.
|
455 |
+
self.norm2 = nn.LayerNorm(time_mix_inner_dim)
|
456 |
+
self.attn2 = Attention(
|
457 |
+
query_dim=time_mix_inner_dim,
|
458 |
+
cross_attention_dim=cross_attention_dim,
|
459 |
+
heads=num_attention_heads,
|
460 |
+
dim_head=attention_head_dim,
|
461 |
+
) # is self-attn if encoder_hidden_states is none
|
462 |
+
else:
|
463 |
+
self.norm2 = None
|
464 |
+
self.attn2 = None
|
465 |
+
|
466 |
+
# 3. Feed-forward
|
467 |
+
self.norm3 = nn.LayerNorm(time_mix_inner_dim)
|
468 |
+
self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu")
|
469 |
+
|
470 |
+
# let chunk size default to None
|
471 |
+
self._chunk_size = None
|
472 |
+
self._chunk_dim = None
|
473 |
+
|
474 |
+
def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):
|
475 |
+
# Sets chunk feed-forward
|
476 |
+
self._chunk_size = chunk_size
|
477 |
+
# chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off
|
478 |
+
self._chunk_dim = 1
|
479 |
+
|
480 |
+
def forward(
|
481 |
+
self,
|
482 |
+
hidden_states: torch.FloatTensor,
|
483 |
+
num_frames: int,
|
484 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
485 |
+
) -> torch.FloatTensor:
|
486 |
+
# Notice that normalization is always applied before the real computation in the following blocks.
|
487 |
+
# 0. Self-Attention
|
488 |
+
batch_size = hidden_states.shape[0]
|
489 |
+
|
490 |
+
batch_frames, seq_length, channels = hidden_states.shape
|
491 |
+
batch_size = batch_frames // num_frames
|
492 |
+
|
493 |
+
hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels)
|
494 |
+
hidden_states = hidden_states.permute(0, 2, 1, 3)
|
495 |
+
hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels)
|
496 |
+
|
497 |
+
residual = hidden_states
|
498 |
+
hidden_states = self.norm_in(hidden_states)
|
499 |
+
|
500 |
+
if self._chunk_size is not None:
|
501 |
+
hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size)
|
502 |
+
else:
|
503 |
+
hidden_states = self.ff_in(hidden_states)
|
504 |
+
|
505 |
+
if self.is_res:
|
506 |
+
hidden_states = hidden_states + residual
|
507 |
+
|
508 |
+
norm_hidden_states = self.norm1(hidden_states)
|
509 |
+
attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
|
510 |
+
hidden_states = attn_output + hidden_states
|
511 |
+
|
512 |
+
# 3. Cross-Attention
|
513 |
+
if self.attn2 is not None:
|
514 |
+
norm_hidden_states = self.norm2(hidden_states)
|
515 |
+
attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
|
516 |
+
hidden_states = attn_output + hidden_states
|
517 |
+
|
518 |
+
# 4. Feed-forward
|
519 |
+
norm_hidden_states = self.norm3(hidden_states)
|
520 |
+
|
521 |
+
if self._chunk_size is not None:
|
522 |
+
ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
|
523 |
+
else:
|
524 |
+
ff_output = self.ff(norm_hidden_states)
|
525 |
+
|
526 |
+
if self.is_res:
|
527 |
+
hidden_states = ff_output + hidden_states
|
528 |
+
else:
|
529 |
+
hidden_states = ff_output
|
530 |
+
|
531 |
+
hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels)
|
532 |
+
hidden_states = hidden_states.permute(0, 2, 1, 3)
|
533 |
+
hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels)
|
534 |
+
|
535 |
+
return hidden_states
|
536 |
+
|
537 |
+
|
538 |
+
class SkipFFTransformerBlock(nn.Module):
|
539 |
+
def __init__(
|
540 |
+
self,
|
541 |
+
dim: int,
|
542 |
+
num_attention_heads: int,
|
543 |
+
attention_head_dim: int,
|
544 |
+
kv_input_dim: int,
|
545 |
+
kv_input_dim_proj_use_bias: bool,
|
546 |
+
dropout=0.0,
|
547 |
+
cross_attention_dim: Optional[int] = None,
|
548 |
+
attention_bias: bool = False,
|
549 |
+
attention_out_bias: bool = True,
|
550 |
+
):
|
551 |
+
super().__init__()
|
552 |
+
if kv_input_dim != dim:
|
553 |
+
self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias)
|
554 |
+
else:
|
555 |
+
self.kv_mapper = None
|
556 |
+
|
557 |
+
self.norm1 = RMSNorm(dim, 1e-06)
|
558 |
+
|
559 |
+
self.attn1 = Attention(
|
560 |
+
query_dim=dim,
|
561 |
+
heads=num_attention_heads,
|
562 |
+
dim_head=attention_head_dim,
|
563 |
+
dropout=dropout,
|
564 |
+
bias=attention_bias,
|
565 |
+
cross_attention_dim=cross_attention_dim,
|
566 |
+
out_bias=attention_out_bias,
|
567 |
+
)
|
568 |
+
|
569 |
+
self.norm2 = RMSNorm(dim, 1e-06)
|
570 |
+
|
571 |
+
self.attn2 = Attention(
|
572 |
+
query_dim=dim,
|
573 |
+
cross_attention_dim=cross_attention_dim,
|
574 |
+
heads=num_attention_heads,
|
575 |
+
dim_head=attention_head_dim,
|
576 |
+
dropout=dropout,
|
577 |
+
bias=attention_bias,
|
578 |
+
out_bias=attention_out_bias,
|
579 |
+
)
|
580 |
+
|
581 |
+
def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs):
|
582 |
+
cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
|
583 |
+
|
584 |
+
if self.kv_mapper is not None:
|
585 |
+
encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states))
|
586 |
+
|
587 |
+
norm_hidden_states = self.norm1(hidden_states)
|
588 |
+
|
589 |
+
attn_output = self.attn1(
|
590 |
+
norm_hidden_states,
|
591 |
+
encoder_hidden_states=encoder_hidden_states,
|
592 |
+
**cross_attention_kwargs,
|
593 |
+
)
|
594 |
+
|
595 |
+
hidden_states = attn_output + hidden_states
|
596 |
+
|
597 |
+
norm_hidden_states = self.norm2(hidden_states)
|
598 |
+
|
599 |
+
attn_output = self.attn2(
|
600 |
+
norm_hidden_states,
|
601 |
+
encoder_hidden_states=encoder_hidden_states,
|
602 |
+
**cross_attention_kwargs,
|
603 |
+
)
|
604 |
+
|
605 |
+
hidden_states = attn_output + hidden_states
|
606 |
+
|
607 |
+
return hidden_states
|
608 |
+
|
609 |
+
|
610 |
+
class FeedForward(nn.Module):
|
611 |
+
r"""
|
612 |
+
A feed-forward layer.
|
613 |
+
|
614 |
+
Parameters:
|
615 |
+
dim (`int`): The number of channels in the input.
|
616 |
+
dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
|
617 |
+
mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
|
618 |
+
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
619 |
+
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
620 |
+
final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
|
621 |
+
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
|
622 |
+
"""
|
623 |
+
|
624 |
+
def __init__(
|
625 |
+
self,
|
626 |
+
dim: int,
|
627 |
+
dim_out: Optional[int] = None,
|
628 |
+
mult: int = 4,
|
629 |
+
dropout: float = 0.0,
|
630 |
+
activation_fn: str = "geglu",
|
631 |
+
final_dropout: bool = False,
|
632 |
+
inner_dim=None,
|
633 |
+
bias: bool = True,
|
634 |
+
):
|
635 |
+
super().__init__()
|
636 |
+
if inner_dim is None:
|
637 |
+
inner_dim = int(dim * mult)
|
638 |
+
dim_out = dim_out if dim_out is not None else dim
|
639 |
+
linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
|
640 |
+
|
641 |
+
if activation_fn == "gelu":
|
642 |
+
act_fn = GELU(dim, inner_dim, bias=bias)
|
643 |
+
if activation_fn == "gelu-approximate":
|
644 |
+
act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
|
645 |
+
elif activation_fn == "geglu":
|
646 |
+
act_fn = GEGLU(dim, inner_dim, bias=bias)
|
647 |
+
elif activation_fn == "geglu-approximate":
|
648 |
+
act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
|
649 |
+
|
650 |
+
self.net = nn.ModuleList([])
|
651 |
+
# project in
|
652 |
+
self.net.append(act_fn)
|
653 |
+
# project dropout
|
654 |
+
self.net.append(nn.Dropout(dropout))
|
655 |
+
# project out
|
656 |
+
self.net.append(linear_cls(inner_dim, dim_out, bias=bias))
|
657 |
+
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
|
658 |
+
if final_dropout:
|
659 |
+
self.net.append(nn.Dropout(dropout))
|
660 |
+
|
661 |
+
def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
|
662 |
+
compatible_cls = (GEGLU,) if USE_PEFT_BACKEND else (GEGLU, LoRACompatibleLinear)
|
663 |
+
for module in self.net:
|
664 |
+
if isinstance(module, compatible_cls):
|
665 |
+
hidden_states = module(hidden_states, scale)
|
666 |
+
else:
|
667 |
+
hidden_states = module(hidden_states)
|
668 |
+
return hidden_states
|
diffusers/models/attention_flax.py
ADDED
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import functools
|
16 |
+
import math
|
17 |
+
|
18 |
+
import flax.linen as nn
|
19 |
+
import jax
|
20 |
+
import jax.numpy as jnp
|
21 |
+
|
22 |
+
|
23 |
+
def _query_chunk_attention(query, key, value, precision, key_chunk_size: int = 4096):
|
24 |
+
"""Multi-head dot product attention with a limited number of queries."""
|
25 |
+
num_kv, num_heads, k_features = key.shape[-3:]
|
26 |
+
v_features = value.shape[-1]
|
27 |
+
key_chunk_size = min(key_chunk_size, num_kv)
|
28 |
+
query = query / jnp.sqrt(k_features)
|
29 |
+
|
30 |
+
@functools.partial(jax.checkpoint, prevent_cse=False)
|
31 |
+
def summarize_chunk(query, key, value):
|
32 |
+
attn_weights = jnp.einsum("...qhd,...khd->...qhk", query, key, precision=precision)
|
33 |
+
|
34 |
+
max_score = jnp.max(attn_weights, axis=-1, keepdims=True)
|
35 |
+
max_score = jax.lax.stop_gradient(max_score)
|
36 |
+
exp_weights = jnp.exp(attn_weights - max_score)
|
37 |
+
|
38 |
+
exp_values = jnp.einsum("...vhf,...qhv->...qhf", value, exp_weights, precision=precision)
|
39 |
+
max_score = jnp.einsum("...qhk->...qh", max_score)
|
40 |
+
|
41 |
+
return (exp_values, exp_weights.sum(axis=-1), max_score)
|
42 |
+
|
43 |
+
def chunk_scanner(chunk_idx):
|
44 |
+
# julienne key array
|
45 |
+
key_chunk = jax.lax.dynamic_slice(
|
46 |
+
operand=key,
|
47 |
+
start_indices=[0] * (key.ndim - 3) + [chunk_idx, 0, 0], # [...,k,h,d]
|
48 |
+
slice_sizes=list(key.shape[:-3]) + [key_chunk_size, num_heads, k_features], # [...,k,h,d]
|
49 |
+
)
|
50 |
+
|
51 |
+
# julienne value array
|
52 |
+
value_chunk = jax.lax.dynamic_slice(
|
53 |
+
operand=value,
|
54 |
+
start_indices=[0] * (value.ndim - 3) + [chunk_idx, 0, 0], # [...,v,h,d]
|
55 |
+
slice_sizes=list(value.shape[:-3]) + [key_chunk_size, num_heads, v_features], # [...,v,h,d]
|
56 |
+
)
|
57 |
+
|
58 |
+
return summarize_chunk(query, key_chunk, value_chunk)
|
59 |
+
|
60 |
+
chunk_values, chunk_weights, chunk_max = jax.lax.map(f=chunk_scanner, xs=jnp.arange(0, num_kv, key_chunk_size))
|
61 |
+
|
62 |
+
global_max = jnp.max(chunk_max, axis=0, keepdims=True)
|
63 |
+
max_diffs = jnp.exp(chunk_max - global_max)
|
64 |
+
|
65 |
+
chunk_values *= jnp.expand_dims(max_diffs, axis=-1)
|
66 |
+
chunk_weights *= max_diffs
|
67 |
+
|
68 |
+
all_values = chunk_values.sum(axis=0)
|
69 |
+
all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis=0)
|
70 |
+
|
71 |
+
return all_values / all_weights
|
72 |
+
|
73 |
+
|
74 |
+
def jax_memory_efficient_attention(
|
75 |
+
query, key, value, precision=jax.lax.Precision.HIGHEST, query_chunk_size: int = 1024, key_chunk_size: int = 4096
|
76 |
+
):
|
77 |
+
r"""
|
78 |
+
Flax Memory-efficient multi-head dot product attention. https://arxiv.org/abs/2112.05682v2
|
79 |
+
https://github.com/AminRezaei0x443/memory-efficient-attention
|
80 |
+
|
81 |
+
Args:
|
82 |
+
query (`jnp.ndarray`): (batch..., query_length, head, query_key_depth_per_head)
|
83 |
+
key (`jnp.ndarray`): (batch..., key_value_length, head, query_key_depth_per_head)
|
84 |
+
value (`jnp.ndarray`): (batch..., key_value_length, head, value_depth_per_head)
|
85 |
+
precision (`jax.lax.Precision`, *optional*, defaults to `jax.lax.Precision.HIGHEST`):
|
86 |
+
numerical precision for computation
|
87 |
+
query_chunk_size (`int`, *optional*, defaults to 1024):
|
88 |
+
chunk size to divide query array value must divide query_length equally without remainder
|
89 |
+
key_chunk_size (`int`, *optional*, defaults to 4096):
|
90 |
+
chunk size to divide key and value array value must divide key_value_length equally without remainder
|
91 |
+
|
92 |
+
Returns:
|
93 |
+
(`jnp.ndarray`) with shape of (batch..., query_length, head, value_depth_per_head)
|
94 |
+
"""
|
95 |
+
num_q, num_heads, q_features = query.shape[-3:]
|
96 |
+
|
97 |
+
def chunk_scanner(chunk_idx, _):
|
98 |
+
# julienne query array
|
99 |
+
query_chunk = jax.lax.dynamic_slice(
|
100 |
+
operand=query,
|
101 |
+
start_indices=([0] * (query.ndim - 3)) + [chunk_idx, 0, 0], # [...,q,h,d]
|
102 |
+
slice_sizes=list(query.shape[:-3]) + [min(query_chunk_size, num_q), num_heads, q_features], # [...,q,h,d]
|
103 |
+
)
|
104 |
+
|
105 |
+
return (
|
106 |
+
chunk_idx + query_chunk_size, # unused ignore it
|
107 |
+
_query_chunk_attention(
|
108 |
+
query=query_chunk, key=key, value=value, precision=precision, key_chunk_size=key_chunk_size
|
109 |
+
),
|
110 |
+
)
|
111 |
+
|
112 |
+
_, res = jax.lax.scan(
|
113 |
+
f=chunk_scanner,
|
114 |
+
init=0,
|
115 |
+
xs=None,
|
116 |
+
length=math.ceil(num_q / query_chunk_size), # start counter # stop counter
|
117 |
+
)
|
118 |
+
|
119 |
+
return jnp.concatenate(res, axis=-3) # fuse the chunked result back
|
120 |
+
|
121 |
+
|
122 |
+
class FlaxAttention(nn.Module):
|
123 |
+
r"""
|
124 |
+
A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762
|
125 |
+
|
126 |
+
Parameters:
|
127 |
+
query_dim (:obj:`int`):
|
128 |
+
Input hidden states dimension
|
129 |
+
heads (:obj:`int`, *optional*, defaults to 8):
|
130 |
+
Number of heads
|
131 |
+
dim_head (:obj:`int`, *optional*, defaults to 64):
|
132 |
+
Hidden states dimension inside each head
|
133 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
134 |
+
Dropout rate
|
135 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
136 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
137 |
+
split_head_dim (`bool`, *optional*, defaults to `False`):
|
138 |
+
Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
|
139 |
+
enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
|
140 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
141 |
+
Parameters `dtype`
|
142 |
+
|
143 |
+
"""
|
144 |
+
|
145 |
+
query_dim: int
|
146 |
+
heads: int = 8
|
147 |
+
dim_head: int = 64
|
148 |
+
dropout: float = 0.0
|
149 |
+
use_memory_efficient_attention: bool = False
|
150 |
+
split_head_dim: bool = False
|
151 |
+
dtype: jnp.dtype = jnp.float32
|
152 |
+
|
153 |
+
def setup(self):
|
154 |
+
inner_dim = self.dim_head * self.heads
|
155 |
+
self.scale = self.dim_head**-0.5
|
156 |
+
|
157 |
+
# Weights were exported with old names {to_q, to_k, to_v, to_out}
|
158 |
+
self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q")
|
159 |
+
self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k")
|
160 |
+
self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v")
|
161 |
+
|
162 |
+
self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0")
|
163 |
+
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
164 |
+
|
165 |
+
def reshape_heads_to_batch_dim(self, tensor):
|
166 |
+
batch_size, seq_len, dim = tensor.shape
|
167 |
+
head_size = self.heads
|
168 |
+
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
|
169 |
+
tensor = jnp.transpose(tensor, (0, 2, 1, 3))
|
170 |
+
tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
|
171 |
+
return tensor
|
172 |
+
|
173 |
+
def reshape_batch_dim_to_heads(self, tensor):
|
174 |
+
batch_size, seq_len, dim = tensor.shape
|
175 |
+
head_size = self.heads
|
176 |
+
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
|
177 |
+
tensor = jnp.transpose(tensor, (0, 2, 1, 3))
|
178 |
+
tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size)
|
179 |
+
return tensor
|
180 |
+
|
181 |
+
def __call__(self, hidden_states, context=None, deterministic=True):
|
182 |
+
context = hidden_states if context is None else context
|
183 |
+
|
184 |
+
query_proj = self.query(hidden_states)
|
185 |
+
key_proj = self.key(context)
|
186 |
+
value_proj = self.value(context)
|
187 |
+
|
188 |
+
if self.split_head_dim:
|
189 |
+
b = hidden_states.shape[0]
|
190 |
+
query_states = jnp.reshape(query_proj, (b, -1, self.heads, self.dim_head))
|
191 |
+
key_states = jnp.reshape(key_proj, (b, -1, self.heads, self.dim_head))
|
192 |
+
value_states = jnp.reshape(value_proj, (b, -1, self.heads, self.dim_head))
|
193 |
+
else:
|
194 |
+
query_states = self.reshape_heads_to_batch_dim(query_proj)
|
195 |
+
key_states = self.reshape_heads_to_batch_dim(key_proj)
|
196 |
+
value_states = self.reshape_heads_to_batch_dim(value_proj)
|
197 |
+
|
198 |
+
if self.use_memory_efficient_attention:
|
199 |
+
query_states = query_states.transpose(1, 0, 2)
|
200 |
+
key_states = key_states.transpose(1, 0, 2)
|
201 |
+
value_states = value_states.transpose(1, 0, 2)
|
202 |
+
|
203 |
+
# this if statement create a chunk size for each layer of the unet
|
204 |
+
# the chunk size is equal to the query_length dimension of the deepest layer of the unet
|
205 |
+
|
206 |
+
flatten_latent_dim = query_states.shape[-3]
|
207 |
+
if flatten_latent_dim % 64 == 0:
|
208 |
+
query_chunk_size = int(flatten_latent_dim / 64)
|
209 |
+
elif flatten_latent_dim % 16 == 0:
|
210 |
+
query_chunk_size = int(flatten_latent_dim / 16)
|
211 |
+
elif flatten_latent_dim % 4 == 0:
|
212 |
+
query_chunk_size = int(flatten_latent_dim / 4)
|
213 |
+
else:
|
214 |
+
query_chunk_size = int(flatten_latent_dim)
|
215 |
+
|
216 |
+
hidden_states = jax_memory_efficient_attention(
|
217 |
+
query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4
|
218 |
+
)
|
219 |
+
|
220 |
+
hidden_states = hidden_states.transpose(1, 0, 2)
|
221 |
+
else:
|
222 |
+
# compute attentions
|
223 |
+
if self.split_head_dim:
|
224 |
+
attention_scores = jnp.einsum("b t n h, b f n h -> b n f t", key_states, query_states)
|
225 |
+
else:
|
226 |
+
attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states)
|
227 |
+
|
228 |
+
attention_scores = attention_scores * self.scale
|
229 |
+
attention_probs = nn.softmax(attention_scores, axis=-1 if self.split_head_dim else 2)
|
230 |
+
|
231 |
+
# attend to values
|
232 |
+
if self.split_head_dim:
|
233 |
+
hidden_states = jnp.einsum("b n f t, b t n h -> b f n h", attention_probs, value_states)
|
234 |
+
b = hidden_states.shape[0]
|
235 |
+
hidden_states = jnp.reshape(hidden_states, (b, -1, self.heads * self.dim_head))
|
236 |
+
else:
|
237 |
+
hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states)
|
238 |
+
hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
|
239 |
+
|
240 |
+
hidden_states = self.proj_attn(hidden_states)
|
241 |
+
return self.dropout_layer(hidden_states, deterministic=deterministic)
|
242 |
+
|
243 |
+
|
244 |
+
class FlaxBasicTransformerBlock(nn.Module):
|
245 |
+
r"""
|
246 |
+
A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in:
|
247 |
+
https://arxiv.org/abs/1706.03762
|
248 |
+
|
249 |
+
|
250 |
+
Parameters:
|
251 |
+
dim (:obj:`int`):
|
252 |
+
Inner hidden states dimension
|
253 |
+
n_heads (:obj:`int`):
|
254 |
+
Number of heads
|
255 |
+
d_head (:obj:`int`):
|
256 |
+
Hidden states dimension inside each head
|
257 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
258 |
+
Dropout rate
|
259 |
+
only_cross_attention (`bool`, defaults to `False`):
|
260 |
+
Whether to only apply cross attention.
|
261 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
262 |
+
Parameters `dtype`
|
263 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
264 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
265 |
+
split_head_dim (`bool`, *optional*, defaults to `False`):
|
266 |
+
Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
|
267 |
+
enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
|
268 |
+
"""
|
269 |
+
|
270 |
+
dim: int
|
271 |
+
n_heads: int
|
272 |
+
d_head: int
|
273 |
+
dropout: float = 0.0
|
274 |
+
only_cross_attention: bool = False
|
275 |
+
dtype: jnp.dtype = jnp.float32
|
276 |
+
use_memory_efficient_attention: bool = False
|
277 |
+
split_head_dim: bool = False
|
278 |
+
|
279 |
+
def setup(self):
|
280 |
+
# self attention (or cross_attention if only_cross_attention is True)
|
281 |
+
self.attn1 = FlaxAttention(
|
282 |
+
self.dim,
|
283 |
+
self.n_heads,
|
284 |
+
self.d_head,
|
285 |
+
self.dropout,
|
286 |
+
self.use_memory_efficient_attention,
|
287 |
+
self.split_head_dim,
|
288 |
+
dtype=self.dtype,
|
289 |
+
)
|
290 |
+
# cross attention
|
291 |
+
self.attn2 = FlaxAttention(
|
292 |
+
self.dim,
|
293 |
+
self.n_heads,
|
294 |
+
self.d_head,
|
295 |
+
self.dropout,
|
296 |
+
self.use_memory_efficient_attention,
|
297 |
+
self.split_head_dim,
|
298 |
+
dtype=self.dtype,
|
299 |
+
)
|
300 |
+
self.ff = FlaxFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype)
|
301 |
+
self.norm1 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
302 |
+
self.norm2 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
303 |
+
self.norm3 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype)
|
304 |
+
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
305 |
+
|
306 |
+
def __call__(self, hidden_states, context, deterministic=True):
|
307 |
+
# self attention
|
308 |
+
residual = hidden_states
|
309 |
+
if self.only_cross_attention:
|
310 |
+
hidden_states = self.attn1(self.norm1(hidden_states), context, deterministic=deterministic)
|
311 |
+
else:
|
312 |
+
hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic)
|
313 |
+
hidden_states = hidden_states + residual
|
314 |
+
|
315 |
+
# cross attention
|
316 |
+
residual = hidden_states
|
317 |
+
hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic)
|
318 |
+
hidden_states = hidden_states + residual
|
319 |
+
|
320 |
+
# feed forward
|
321 |
+
residual = hidden_states
|
322 |
+
hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic)
|
323 |
+
hidden_states = hidden_states + residual
|
324 |
+
|
325 |
+
return self.dropout_layer(hidden_states, deterministic=deterministic)
|
326 |
+
|
327 |
+
|
328 |
+
class FlaxTransformer2DModel(nn.Module):
|
329 |
+
r"""
|
330 |
+
A Spatial Transformer layer with Gated Linear Unit (GLU) activation function as described in:
|
331 |
+
https://arxiv.org/pdf/1506.02025.pdf
|
332 |
+
|
333 |
+
|
334 |
+
Parameters:
|
335 |
+
in_channels (:obj:`int`):
|
336 |
+
Input number of channels
|
337 |
+
n_heads (:obj:`int`):
|
338 |
+
Number of heads
|
339 |
+
d_head (:obj:`int`):
|
340 |
+
Hidden states dimension inside each head
|
341 |
+
depth (:obj:`int`, *optional*, defaults to 1):
|
342 |
+
Number of transformers block
|
343 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
344 |
+
Dropout rate
|
345 |
+
use_linear_projection (`bool`, defaults to `False`): tbd
|
346 |
+
only_cross_attention (`bool`, defaults to `False`): tbd
|
347 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
348 |
+
Parameters `dtype`
|
349 |
+
use_memory_efficient_attention (`bool`, *optional*, defaults to `False`):
|
350 |
+
enable memory efficient attention https://arxiv.org/abs/2112.05682
|
351 |
+
split_head_dim (`bool`, *optional*, defaults to `False`):
|
352 |
+
Whether to split the head dimension into a new axis for the self-attention computation. In most cases,
|
353 |
+
enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL.
|
354 |
+
"""
|
355 |
+
|
356 |
+
in_channels: int
|
357 |
+
n_heads: int
|
358 |
+
d_head: int
|
359 |
+
depth: int = 1
|
360 |
+
dropout: float = 0.0
|
361 |
+
use_linear_projection: bool = False
|
362 |
+
only_cross_attention: bool = False
|
363 |
+
dtype: jnp.dtype = jnp.float32
|
364 |
+
use_memory_efficient_attention: bool = False
|
365 |
+
split_head_dim: bool = False
|
366 |
+
|
367 |
+
def setup(self):
|
368 |
+
self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5)
|
369 |
+
|
370 |
+
inner_dim = self.n_heads * self.d_head
|
371 |
+
if self.use_linear_projection:
|
372 |
+
self.proj_in = nn.Dense(inner_dim, dtype=self.dtype)
|
373 |
+
else:
|
374 |
+
self.proj_in = nn.Conv(
|
375 |
+
inner_dim,
|
376 |
+
kernel_size=(1, 1),
|
377 |
+
strides=(1, 1),
|
378 |
+
padding="VALID",
|
379 |
+
dtype=self.dtype,
|
380 |
+
)
|
381 |
+
|
382 |
+
self.transformer_blocks = [
|
383 |
+
FlaxBasicTransformerBlock(
|
384 |
+
inner_dim,
|
385 |
+
self.n_heads,
|
386 |
+
self.d_head,
|
387 |
+
dropout=self.dropout,
|
388 |
+
only_cross_attention=self.only_cross_attention,
|
389 |
+
dtype=self.dtype,
|
390 |
+
use_memory_efficient_attention=self.use_memory_efficient_attention,
|
391 |
+
split_head_dim=self.split_head_dim,
|
392 |
+
)
|
393 |
+
for _ in range(self.depth)
|
394 |
+
]
|
395 |
+
|
396 |
+
if self.use_linear_projection:
|
397 |
+
self.proj_out = nn.Dense(inner_dim, dtype=self.dtype)
|
398 |
+
else:
|
399 |
+
self.proj_out = nn.Conv(
|
400 |
+
inner_dim,
|
401 |
+
kernel_size=(1, 1),
|
402 |
+
strides=(1, 1),
|
403 |
+
padding="VALID",
|
404 |
+
dtype=self.dtype,
|
405 |
+
)
|
406 |
+
|
407 |
+
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
408 |
+
|
409 |
+
def __call__(self, hidden_states, context, deterministic=True):
|
410 |
+
batch, height, width, channels = hidden_states.shape
|
411 |
+
residual = hidden_states
|
412 |
+
hidden_states = self.norm(hidden_states)
|
413 |
+
if self.use_linear_projection:
|
414 |
+
hidden_states = hidden_states.reshape(batch, height * width, channels)
|
415 |
+
hidden_states = self.proj_in(hidden_states)
|
416 |
+
else:
|
417 |
+
hidden_states = self.proj_in(hidden_states)
|
418 |
+
hidden_states = hidden_states.reshape(batch, height * width, channels)
|
419 |
+
|
420 |
+
for transformer_block in self.transformer_blocks:
|
421 |
+
hidden_states = transformer_block(hidden_states, context, deterministic=deterministic)
|
422 |
+
|
423 |
+
if self.use_linear_projection:
|
424 |
+
hidden_states = self.proj_out(hidden_states)
|
425 |
+
hidden_states = hidden_states.reshape(batch, height, width, channels)
|
426 |
+
else:
|
427 |
+
hidden_states = hidden_states.reshape(batch, height, width, channels)
|
428 |
+
hidden_states = self.proj_out(hidden_states)
|
429 |
+
|
430 |
+
hidden_states = hidden_states + residual
|
431 |
+
return self.dropout_layer(hidden_states, deterministic=deterministic)
|
432 |
+
|
433 |
+
|
434 |
+
class FlaxFeedForward(nn.Module):
|
435 |
+
r"""
|
436 |
+
Flax module that encapsulates two Linear layers separated by a non-linearity. It is the counterpart of PyTorch's
|
437 |
+
[`FeedForward`] class, with the following simplifications:
|
438 |
+
- The activation function is currently hardcoded to a gated linear unit from:
|
439 |
+
https://arxiv.org/abs/2002.05202
|
440 |
+
- `dim_out` is equal to `dim`.
|
441 |
+
- The number of hidden dimensions is hardcoded to `dim * 4` in [`FlaxGELU`].
|
442 |
+
|
443 |
+
Parameters:
|
444 |
+
dim (:obj:`int`):
|
445 |
+
Inner hidden states dimension
|
446 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
447 |
+
Dropout rate
|
448 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
449 |
+
Parameters `dtype`
|
450 |
+
"""
|
451 |
+
|
452 |
+
dim: int
|
453 |
+
dropout: float = 0.0
|
454 |
+
dtype: jnp.dtype = jnp.float32
|
455 |
+
|
456 |
+
def setup(self):
|
457 |
+
# The second linear layer needs to be called
|
458 |
+
# net_2 for now to match the index of the Sequential layer
|
459 |
+
self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype)
|
460 |
+
self.net_2 = nn.Dense(self.dim, dtype=self.dtype)
|
461 |
+
|
462 |
+
def __call__(self, hidden_states, deterministic=True):
|
463 |
+
hidden_states = self.net_0(hidden_states, deterministic=deterministic)
|
464 |
+
hidden_states = self.net_2(hidden_states)
|
465 |
+
return hidden_states
|
466 |
+
|
467 |
+
|
468 |
+
class FlaxGEGLU(nn.Module):
|
469 |
+
r"""
|
470 |
+
Flax implementation of a Linear layer followed by the variant of the gated linear unit activation function from
|
471 |
+
https://arxiv.org/abs/2002.05202.
|
472 |
+
|
473 |
+
Parameters:
|
474 |
+
dim (:obj:`int`):
|
475 |
+
Input hidden states dimension
|
476 |
+
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
477 |
+
Dropout rate
|
478 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
479 |
+
Parameters `dtype`
|
480 |
+
"""
|
481 |
+
|
482 |
+
dim: int
|
483 |
+
dropout: float = 0.0
|
484 |
+
dtype: jnp.dtype = jnp.float32
|
485 |
+
|
486 |
+
def setup(self):
|
487 |
+
inner_dim = self.dim * 4
|
488 |
+
self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype)
|
489 |
+
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
490 |
+
|
491 |
+
def __call__(self, hidden_states, deterministic=True):
|
492 |
+
hidden_states = self.proj(hidden_states)
|
493 |
+
hidden_linear, hidden_gelu = jnp.split(hidden_states, 2, axis=2)
|
494 |
+
return self.dropout_layer(hidden_linear * nn.gelu(hidden_gelu), deterministic=deterministic)
|
diffusers/models/attention_processor.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
diffusers/models/autoencoders/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
|
2 |
+
from .autoencoder_kl import AutoencoderKL
|
3 |
+
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
|
4 |
+
from .autoencoder_tiny import AutoencoderTiny
|
5 |
+
from .consistency_decoder_vae import ConsistencyDecoderVAE
|
diffusers/models/autoencoders/autoencoder_asym_kl.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import Optional, Tuple, Union
|
15 |
+
|
16 |
+
import torch
|
17 |
+
import torch.nn as nn
|
18 |
+
|
19 |
+
from ...configuration_utils import ConfigMixin, register_to_config
|
20 |
+
from ...utils.accelerate_utils import apply_forward_hook
|
21 |
+
from ..modeling_outputs import AutoencoderKLOutput
|
22 |
+
from ..modeling_utils import ModelMixin
|
23 |
+
from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder, MaskConditionDecoder
|
24 |
+
|
25 |
+
|
26 |
+
class AsymmetricAutoencoderKL(ModelMixin, ConfigMixin):
|
27 |
+
r"""
|
28 |
+
Designing a Better Asymmetric VQGAN for StableDiffusion https://arxiv.org/abs/2306.04632 . A VAE model with KL loss
|
29 |
+
for encoding images into latents and decoding latent representations into images.
|
30 |
+
|
31 |
+
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
|
32 |
+
for all models (such as downloading or saving).
|
33 |
+
|
34 |
+
Parameters:
|
35 |
+
in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
|
36 |
+
out_channels (int, *optional*, defaults to 3): Number of channels in the output.
|
37 |
+
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
|
38 |
+
Tuple of downsample block types.
|
39 |
+
down_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
|
40 |
+
Tuple of down block output channels.
|
41 |
+
layers_per_down_block (`int`, *optional*, defaults to `1`):
|
42 |
+
Number layers for down block.
|
43 |
+
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
|
44 |
+
Tuple of upsample block types.
|
45 |
+
up_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
|
46 |
+
Tuple of up block output channels.
|
47 |
+
layers_per_up_block (`int`, *optional*, defaults to `1`):
|
48 |
+
Number layers for up block.
|
49 |
+
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
|
50 |
+
latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
|
51 |
+
sample_size (`int`, *optional*, defaults to `32`): Sample input size.
|
52 |
+
norm_num_groups (`int`, *optional*, defaults to `32`):
|
53 |
+
Number of groups to use for the first normalization layer in ResNet blocks.
|
54 |
+
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
55 |
+
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
56 |
+
training set. This is used to scale the latent space to have unit variance when training the diffusion
|
57 |
+
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
|
58 |
+
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
|
59 |
+
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
|
60 |
+
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
|
61 |
+
"""
|
62 |
+
|
63 |
+
@register_to_config
|
64 |
+
def __init__(
|
65 |
+
self,
|
66 |
+
in_channels: int = 3,
|
67 |
+
out_channels: int = 3,
|
68 |
+
down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
|
69 |
+
down_block_out_channels: Tuple[int, ...] = (64,),
|
70 |
+
layers_per_down_block: int = 1,
|
71 |
+
up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
|
72 |
+
up_block_out_channels: Tuple[int, ...] = (64,),
|
73 |
+
layers_per_up_block: int = 1,
|
74 |
+
act_fn: str = "silu",
|
75 |
+
latent_channels: int = 4,
|
76 |
+
norm_num_groups: int = 32,
|
77 |
+
sample_size: int = 32,
|
78 |
+
scaling_factor: float = 0.18215,
|
79 |
+
) -> None:
|
80 |
+
super().__init__()
|
81 |
+
|
82 |
+
# pass init params to Encoder
|
83 |
+
self.encoder = Encoder(
|
84 |
+
in_channels=in_channels,
|
85 |
+
out_channels=latent_channels,
|
86 |
+
down_block_types=down_block_types,
|
87 |
+
block_out_channels=down_block_out_channels,
|
88 |
+
layers_per_block=layers_per_down_block,
|
89 |
+
act_fn=act_fn,
|
90 |
+
norm_num_groups=norm_num_groups,
|
91 |
+
double_z=True,
|
92 |
+
)
|
93 |
+
|
94 |
+
# pass init params to Decoder
|
95 |
+
self.decoder = MaskConditionDecoder(
|
96 |
+
in_channels=latent_channels,
|
97 |
+
out_channels=out_channels,
|
98 |
+
up_block_types=up_block_types,
|
99 |
+
block_out_channels=up_block_out_channels,
|
100 |
+
layers_per_block=layers_per_up_block,
|
101 |
+
act_fn=act_fn,
|
102 |
+
norm_num_groups=norm_num_groups,
|
103 |
+
)
|
104 |
+
|
105 |
+
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
|
106 |
+
self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
|
107 |
+
|
108 |
+
self.use_slicing = False
|
109 |
+
self.use_tiling = False
|
110 |
+
|
111 |
+
self.register_to_config(block_out_channels=up_block_out_channels)
|
112 |
+
self.register_to_config(force_upcast=False)
|
113 |
+
|
114 |
+
@apply_forward_hook
|
115 |
+
def encode(
|
116 |
+
self, x: torch.FloatTensor, return_dict: bool = True
|
117 |
+
) -> Union[AutoencoderKLOutput, Tuple[torch.FloatTensor]]:
|
118 |
+
h = self.encoder(x)
|
119 |
+
moments = self.quant_conv(h)
|
120 |
+
posterior = DiagonalGaussianDistribution(moments)
|
121 |
+
|
122 |
+
if not return_dict:
|
123 |
+
return (posterior,)
|
124 |
+
|
125 |
+
return AutoencoderKLOutput(latent_dist=posterior)
|
126 |
+
|
127 |
+
def _decode(
|
128 |
+
self,
|
129 |
+
z: torch.FloatTensor,
|
130 |
+
image: Optional[torch.FloatTensor] = None,
|
131 |
+
mask: Optional[torch.FloatTensor] = None,
|
132 |
+
return_dict: bool = True,
|
133 |
+
) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
|
134 |
+
z = self.post_quant_conv(z)
|
135 |
+
dec = self.decoder(z, image, mask)
|
136 |
+
|
137 |
+
if not return_dict:
|
138 |
+
return (dec,)
|
139 |
+
|
140 |
+
return DecoderOutput(sample=dec)
|
141 |
+
|
142 |
+
@apply_forward_hook
|
143 |
+
def decode(
|
144 |
+
self,
|
145 |
+
z: torch.FloatTensor,
|
146 |
+
generator: Optional[torch.Generator] = None,
|
147 |
+
image: Optional[torch.FloatTensor] = None,
|
148 |
+
mask: Optional[torch.FloatTensor] = None,
|
149 |
+
return_dict: bool = True,
|
150 |
+
) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
|
151 |
+
decoded = self._decode(z, image, mask).sample
|
152 |
+
|
153 |
+
if not return_dict:
|
154 |
+
return (decoded,)
|
155 |
+
|
156 |
+
return DecoderOutput(sample=decoded)
|
157 |
+
|
158 |
+
def forward(
|
159 |
+
self,
|
160 |
+
sample: torch.FloatTensor,
|
161 |
+
mask: Optional[torch.FloatTensor] = None,
|
162 |
+
sample_posterior: bool = False,
|
163 |
+
return_dict: bool = True,
|
164 |
+
generator: Optional[torch.Generator] = None,
|
165 |
+
) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
|
166 |
+
r"""
|
167 |
+
Args:
|
168 |
+
sample (`torch.FloatTensor`): Input sample.
|
169 |
+
mask (`torch.FloatTensor`, *optional*, defaults to `None`): Optional inpainting mask.
|
170 |
+
sample_posterior (`bool`, *optional*, defaults to `False`):
|
171 |
+
Whether to sample from the posterior.
|
172 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
173 |
+
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
|
174 |
+
"""
|
175 |
+
x = sample
|
176 |
+
posterior = self.encode(x).latent_dist
|
177 |
+
if sample_posterior:
|
178 |
+
z = posterior.sample(generator=generator)
|
179 |
+
else:
|
180 |
+
z = posterior.mode()
|
181 |
+
dec = self.decode(z, sample, mask).sample
|
182 |
+
|
183 |
+
if not return_dict:
|
184 |
+
return (dec,)
|
185 |
+
|
186 |
+
return DecoderOutput(sample=dec)
|
diffusers/models/autoencoders/autoencoder_kl.py
ADDED
@@ -0,0 +1,487 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import Dict, Optional, Tuple, Union
|
15 |
+
|
16 |
+
import torch
|
17 |
+
import torch.nn as nn
|
18 |
+
|
19 |
+
from ...configuration_utils import ConfigMixin, register_to_config
|
20 |
+
from ...loaders import FromOriginalVAEMixin
|
21 |
+
from ...utils.accelerate_utils import apply_forward_hook
|
22 |
+
from ..attention_processor import (
|
23 |
+
ADDED_KV_ATTENTION_PROCESSORS,
|
24 |
+
CROSS_ATTENTION_PROCESSORS,
|
25 |
+
Attention,
|
26 |
+
AttentionProcessor,
|
27 |
+
AttnAddedKVProcessor,
|
28 |
+
AttnProcessor,
|
29 |
+
)
|
30 |
+
from ..modeling_outputs import AutoencoderKLOutput
|
31 |
+
from ..modeling_utils import ModelMixin
|
32 |
+
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
|
33 |
+
|
34 |
+
|
35 |
+
class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalVAEMixin):
|
36 |
+
r"""
|
37 |
+
A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
|
38 |
+
|
39 |
+
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
|
40 |
+
for all models (such as downloading or saving).
|
41 |
+
|
42 |
+
Parameters:
|
43 |
+
in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
|
44 |
+
out_channels (int, *optional*, defaults to 3): Number of channels in the output.
|
45 |
+
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
|
46 |
+
Tuple of downsample block types.
|
47 |
+
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
|
48 |
+
Tuple of upsample block types.
|
49 |
+
block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
|
50 |
+
Tuple of block output channels.
|
51 |
+
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
|
52 |
+
latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
|
53 |
+
sample_size (`int`, *optional*, defaults to `32`): Sample input size.
|
54 |
+
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
55 |
+
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
56 |
+
training set. This is used to scale the latent space to have unit variance when training the diffusion
|
57 |
+
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
|
58 |
+
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
|
59 |
+
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
|
60 |
+
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
|
61 |
+
force_upcast (`bool`, *optional*, default to `True`):
|
62 |
+
If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
|
63 |
+
can be fine-tuned / trained to a lower range without loosing too much precision in which case
|
64 |
+
`force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
|
65 |
+
"""
|
66 |
+
|
67 |
+
_supports_gradient_checkpointing = True
|
68 |
+
|
69 |
+
@register_to_config
|
70 |
+
def __init__(
|
71 |
+
self,
|
72 |
+
in_channels: int = 3,
|
73 |
+
out_channels: int = 3,
|
74 |
+
down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
|
75 |
+
up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
|
76 |
+
block_out_channels: Tuple[int] = (64,),
|
77 |
+
layers_per_block: int = 1,
|
78 |
+
act_fn: str = "silu",
|
79 |
+
latent_channels: int = 4,
|
80 |
+
norm_num_groups: int = 32,
|
81 |
+
sample_size: int = 32,
|
82 |
+
scaling_factor: float = 0.18215,
|
83 |
+
force_upcast: float = True,
|
84 |
+
):
|
85 |
+
super().__init__()
|
86 |
+
|
87 |
+
# pass init params to Encoder
|
88 |
+
self.encoder = Encoder(
|
89 |
+
in_channels=in_channels,
|
90 |
+
out_channels=latent_channels,
|
91 |
+
down_block_types=down_block_types,
|
92 |
+
block_out_channels=block_out_channels,
|
93 |
+
layers_per_block=layers_per_block,
|
94 |
+
act_fn=act_fn,
|
95 |
+
norm_num_groups=norm_num_groups,
|
96 |
+
double_z=True,
|
97 |
+
)
|
98 |
+
|
99 |
+
# pass init params to Decoder
|
100 |
+
self.decoder = Decoder(
|
101 |
+
in_channels=latent_channels,
|
102 |
+
out_channels=out_channels,
|
103 |
+
up_block_types=up_block_types,
|
104 |
+
block_out_channels=block_out_channels,
|
105 |
+
layers_per_block=layers_per_block,
|
106 |
+
norm_num_groups=norm_num_groups,
|
107 |
+
act_fn=act_fn,
|
108 |
+
)
|
109 |
+
|
110 |
+
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
|
111 |
+
self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
|
112 |
+
|
113 |
+
self.use_slicing = False
|
114 |
+
self.use_tiling = False
|
115 |
+
|
116 |
+
# only relevant if vae tiling is enabled
|
117 |
+
self.tile_sample_min_size = self.config.sample_size
|
118 |
+
sample_size = (
|
119 |
+
self.config.sample_size[0]
|
120 |
+
if isinstance(self.config.sample_size, (list, tuple))
|
121 |
+
else self.config.sample_size
|
122 |
+
)
|
123 |
+
self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
|
124 |
+
self.tile_overlap_factor = 0.25
|
125 |
+
|
126 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
127 |
+
if isinstance(module, (Encoder, Decoder)):
|
128 |
+
module.gradient_checkpointing = value
|
129 |
+
|
130 |
+
def enable_tiling(self, use_tiling: bool = True):
|
131 |
+
r"""
|
132 |
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
133 |
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
134 |
+
processing larger images.
|
135 |
+
"""
|
136 |
+
self.use_tiling = use_tiling
|
137 |
+
|
138 |
+
def disable_tiling(self):
|
139 |
+
r"""
|
140 |
+
Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
|
141 |
+
decoding in one step.
|
142 |
+
"""
|
143 |
+
self.enable_tiling(False)
|
144 |
+
|
145 |
+
def enable_slicing(self):
|
146 |
+
r"""
|
147 |
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
148 |
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
149 |
+
"""
|
150 |
+
self.use_slicing = True
|
151 |
+
|
152 |
+
def disable_slicing(self):
|
153 |
+
r"""
|
154 |
+
Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
|
155 |
+
decoding in one step.
|
156 |
+
"""
|
157 |
+
self.use_slicing = False
|
158 |
+
|
159 |
+
@property
|
160 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
|
161 |
+
def attn_processors(self) -> Dict[str, AttentionProcessor]:
|
162 |
+
r"""
|
163 |
+
Returns:
|
164 |
+
`dict` of attention processors: A dictionary containing all attention processors used in the model with
|
165 |
+
indexed by its weight name.
|
166 |
+
"""
|
167 |
+
# set recursively
|
168 |
+
processors = {}
|
169 |
+
|
170 |
+
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
|
171 |
+
if hasattr(module, "get_processor"):
|
172 |
+
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
|
173 |
+
|
174 |
+
for sub_name, child in module.named_children():
|
175 |
+
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
176 |
+
|
177 |
+
return processors
|
178 |
+
|
179 |
+
for name, module in self.named_children():
|
180 |
+
fn_recursive_add_processors(name, module, processors)
|
181 |
+
|
182 |
+
return processors
|
183 |
+
|
184 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
|
185 |
+
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
|
186 |
+
r"""
|
187 |
+
Sets the attention processor to use to compute attention.
|
188 |
+
|
189 |
+
Parameters:
|
190 |
+
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
191 |
+
The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
192 |
+
for **all** `Attention` layers.
|
193 |
+
|
194 |
+
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
195 |
+
processor. This is strongly recommended when setting trainable attention processors.
|
196 |
+
|
197 |
+
"""
|
198 |
+
count = len(self.attn_processors.keys())
|
199 |
+
|
200 |
+
if isinstance(processor, dict) and len(processor) != count:
|
201 |
+
raise ValueError(
|
202 |
+
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
203 |
+
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
204 |
+
)
|
205 |
+
|
206 |
+
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
207 |
+
if hasattr(module, "set_processor"):
|
208 |
+
if not isinstance(processor, dict):
|
209 |
+
module.set_processor(processor)
|
210 |
+
else:
|
211 |
+
module.set_processor(processor.pop(f"{name}.processor"))
|
212 |
+
|
213 |
+
for sub_name, child in module.named_children():
|
214 |
+
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
215 |
+
|
216 |
+
for name, module in self.named_children():
|
217 |
+
fn_recursive_attn_processor(name, module, processor)
|
218 |
+
|
219 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
|
220 |
+
def set_default_attn_processor(self):
|
221 |
+
"""
|
222 |
+
Disables custom attention processors and sets the default attention implementation.
|
223 |
+
"""
|
224 |
+
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
225 |
+
processor = AttnAddedKVProcessor()
|
226 |
+
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
227 |
+
processor = AttnProcessor()
|
228 |
+
else:
|
229 |
+
raise ValueError(
|
230 |
+
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
|
231 |
+
)
|
232 |
+
|
233 |
+
self.set_attn_processor(processor)
|
234 |
+
|
235 |
+
@apply_forward_hook
|
236 |
+
def encode(
|
237 |
+
self, x: torch.FloatTensor, return_dict: bool = True
|
238 |
+
) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
|
239 |
+
"""
|
240 |
+
Encode a batch of images into latents.
|
241 |
+
|
242 |
+
Args:
|
243 |
+
x (`torch.FloatTensor`): Input batch of images.
|
244 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
245 |
+
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
|
246 |
+
|
247 |
+
Returns:
|
248 |
+
The latent representations of the encoded images. If `return_dict` is True, a
|
249 |
+
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
|
250 |
+
"""
|
251 |
+
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
|
252 |
+
return self.tiled_encode(x, return_dict=return_dict)
|
253 |
+
|
254 |
+
if self.use_slicing and x.shape[0] > 1:
|
255 |
+
encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
|
256 |
+
h = torch.cat(encoded_slices)
|
257 |
+
else:
|
258 |
+
h = self.encoder(x)
|
259 |
+
|
260 |
+
moments = self.quant_conv(h)
|
261 |
+
posterior = DiagonalGaussianDistribution(moments)
|
262 |
+
|
263 |
+
if not return_dict:
|
264 |
+
return (posterior,)
|
265 |
+
|
266 |
+
return AutoencoderKLOutput(latent_dist=posterior)
|
267 |
+
|
268 |
+
def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
|
269 |
+
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
|
270 |
+
return self.tiled_decode(z, return_dict=return_dict)
|
271 |
+
|
272 |
+
z = self.post_quant_conv(z)
|
273 |
+
dec = self.decoder(z)
|
274 |
+
|
275 |
+
if not return_dict:
|
276 |
+
return (dec,)
|
277 |
+
|
278 |
+
return DecoderOutput(sample=dec)
|
279 |
+
|
280 |
+
@apply_forward_hook
|
281 |
+
def decode(
|
282 |
+
self, z: torch.FloatTensor, return_dict: bool = True, generator=None
|
283 |
+
) -> Union[DecoderOutput, torch.FloatTensor]:
|
284 |
+
"""
|
285 |
+
Decode a batch of images.
|
286 |
+
|
287 |
+
Args:
|
288 |
+
z (`torch.FloatTensor`): Input batch of latent vectors.
|
289 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
290 |
+
Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
|
291 |
+
|
292 |
+
Returns:
|
293 |
+
[`~models.vae.DecoderOutput`] or `tuple`:
|
294 |
+
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
|
295 |
+
returned.
|
296 |
+
|
297 |
+
"""
|
298 |
+
if self.use_slicing and z.shape[0] > 1:
|
299 |
+
decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
|
300 |
+
decoded = torch.cat(decoded_slices)
|
301 |
+
else:
|
302 |
+
decoded = self._decode(z).sample
|
303 |
+
|
304 |
+
if not return_dict:
|
305 |
+
return (decoded,)
|
306 |
+
|
307 |
+
return DecoderOutput(sample=decoded)
|
308 |
+
|
309 |
+
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
|
310 |
+
blend_extent = min(a.shape[2], b.shape[2], blend_extent)
|
311 |
+
for y in range(blend_extent):
|
312 |
+
b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
|
313 |
+
return b
|
314 |
+
|
315 |
+
def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
|
316 |
+
blend_extent = min(a.shape[3], b.shape[3], blend_extent)
|
317 |
+
for x in range(blend_extent):
|
318 |
+
b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
|
319 |
+
return b
|
320 |
+
|
321 |
+
def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
|
322 |
+
r"""Encode a batch of images using a tiled encoder.
|
323 |
+
|
324 |
+
When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
|
325 |
+
steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
|
326 |
+
different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
|
327 |
+
tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
|
328 |
+
output, but they should be much less noticeable.
|
329 |
+
|
330 |
+
Args:
|
331 |
+
x (`torch.FloatTensor`): Input batch of images.
|
332 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
333 |
+
Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
|
334 |
+
|
335 |
+
Returns:
|
336 |
+
[`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`:
|
337 |
+
If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain
|
338 |
+
`tuple` is returned.
|
339 |
+
"""
|
340 |
+
overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
|
341 |
+
blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
|
342 |
+
row_limit = self.tile_latent_min_size - blend_extent
|
343 |
+
|
344 |
+
# Split the image into 512x512 tiles and encode them separately.
|
345 |
+
rows = []
|
346 |
+
for i in range(0, x.shape[2], overlap_size):
|
347 |
+
row = []
|
348 |
+
for j in range(0, x.shape[3], overlap_size):
|
349 |
+
tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
|
350 |
+
tile = self.encoder(tile)
|
351 |
+
tile = self.quant_conv(tile)
|
352 |
+
row.append(tile)
|
353 |
+
rows.append(row)
|
354 |
+
result_rows = []
|
355 |
+
for i, row in enumerate(rows):
|
356 |
+
result_row = []
|
357 |
+
for j, tile in enumerate(row):
|
358 |
+
# blend the above tile and the left tile
|
359 |
+
# to the current tile and add the current tile to the result row
|
360 |
+
if i > 0:
|
361 |
+
tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
|
362 |
+
if j > 0:
|
363 |
+
tile = self.blend_h(row[j - 1], tile, blend_extent)
|
364 |
+
result_row.append(tile[:, :, :row_limit, :row_limit])
|
365 |
+
result_rows.append(torch.cat(result_row, dim=3))
|
366 |
+
|
367 |
+
moments = torch.cat(result_rows, dim=2)
|
368 |
+
posterior = DiagonalGaussianDistribution(moments)
|
369 |
+
|
370 |
+
if not return_dict:
|
371 |
+
return (posterior,)
|
372 |
+
|
373 |
+
return AutoencoderKLOutput(latent_dist=posterior)
|
374 |
+
|
375 |
+
def tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
|
376 |
+
r"""
|
377 |
+
Decode a batch of images using a tiled decoder.
|
378 |
+
|
379 |
+
Args:
|
380 |
+
z (`torch.FloatTensor`): Input batch of latent vectors.
|
381 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
382 |
+
Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
|
383 |
+
|
384 |
+
Returns:
|
385 |
+
[`~models.vae.DecoderOutput`] or `tuple`:
|
386 |
+
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
|
387 |
+
returned.
|
388 |
+
"""
|
389 |
+
overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
|
390 |
+
blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor)
|
391 |
+
row_limit = self.tile_sample_min_size - blend_extent
|
392 |
+
|
393 |
+
# Split z into overlapping 64x64 tiles and decode them separately.
|
394 |
+
# The tiles have an overlap to avoid seams between tiles.
|
395 |
+
rows = []
|
396 |
+
for i in range(0, z.shape[2], overlap_size):
|
397 |
+
row = []
|
398 |
+
for j in range(0, z.shape[3], overlap_size):
|
399 |
+
tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
|
400 |
+
tile = self.post_quant_conv(tile)
|
401 |
+
decoded = self.decoder(tile)
|
402 |
+
row.append(decoded)
|
403 |
+
rows.append(row)
|
404 |
+
result_rows = []
|
405 |
+
for i, row in enumerate(rows):
|
406 |
+
result_row = []
|
407 |
+
for j, tile in enumerate(row):
|
408 |
+
# blend the above tile and the left tile
|
409 |
+
# to the current tile and add the current tile to the result row
|
410 |
+
if i > 0:
|
411 |
+
tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
|
412 |
+
if j > 0:
|
413 |
+
tile = self.blend_h(row[j - 1], tile, blend_extent)
|
414 |
+
result_row.append(tile[:, :, :row_limit, :row_limit])
|
415 |
+
result_rows.append(torch.cat(result_row, dim=3))
|
416 |
+
|
417 |
+
dec = torch.cat(result_rows, dim=2)
|
418 |
+
if not return_dict:
|
419 |
+
return (dec,)
|
420 |
+
|
421 |
+
return DecoderOutput(sample=dec)
|
422 |
+
|
423 |
+
def forward(
|
424 |
+
self,
|
425 |
+
sample: torch.FloatTensor,
|
426 |
+
sample_posterior: bool = False,
|
427 |
+
return_dict: bool = True,
|
428 |
+
generator: Optional[torch.Generator] = None,
|
429 |
+
) -> Union[DecoderOutput, torch.FloatTensor]:
|
430 |
+
r"""
|
431 |
+
Args:
|
432 |
+
sample (`torch.FloatTensor`): Input sample.
|
433 |
+
sample_posterior (`bool`, *optional*, defaults to `False`):
|
434 |
+
Whether to sample from the posterior.
|
435 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
436 |
+
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
|
437 |
+
"""
|
438 |
+
x = sample
|
439 |
+
posterior = self.encode(x).latent_dist
|
440 |
+
if sample_posterior:
|
441 |
+
z = posterior.sample(generator=generator)
|
442 |
+
else:
|
443 |
+
z = posterior.mode()
|
444 |
+
dec = self.decode(z).sample
|
445 |
+
|
446 |
+
if not return_dict:
|
447 |
+
return (dec,)
|
448 |
+
|
449 |
+
return DecoderOutput(sample=dec)
|
450 |
+
|
451 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
|
452 |
+
def fuse_qkv_projections(self):
|
453 |
+
"""
|
454 |
+
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
|
455 |
+
key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
|
456 |
+
|
457 |
+
<Tip warning={true}>
|
458 |
+
|
459 |
+
This API is 🧪 experimental.
|
460 |
+
|
461 |
+
</Tip>
|
462 |
+
"""
|
463 |
+
self.original_attn_processors = None
|
464 |
+
|
465 |
+
for _, attn_processor in self.attn_processors.items():
|
466 |
+
if "Added" in str(attn_processor.__class__.__name__):
|
467 |
+
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
|
468 |
+
|
469 |
+
self.original_attn_processors = self.attn_processors
|
470 |
+
|
471 |
+
for module in self.modules():
|
472 |
+
if isinstance(module, Attention):
|
473 |
+
module.fuse_projections(fuse=True)
|
474 |
+
|
475 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
|
476 |
+
def unfuse_qkv_projections(self):
|
477 |
+
"""Disables the fused QKV projection if enabled.
|
478 |
+
|
479 |
+
<Tip warning={true}>
|
480 |
+
|
481 |
+
This API is 🧪 experimental.
|
482 |
+
|
483 |
+
</Tip>
|
484 |
+
|
485 |
+
"""
|
486 |
+
if self.original_attn_processors is not None:
|
487 |
+
self.set_attn_processor(self.original_attn_processors)
|
diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py
ADDED
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import Dict, Optional, Tuple, Union
|
15 |
+
|
16 |
+
import torch
|
17 |
+
import torch.nn as nn
|
18 |
+
|
19 |
+
from ...configuration_utils import ConfigMixin, register_to_config
|
20 |
+
from ...loaders import FromOriginalVAEMixin
|
21 |
+
from ...utils import is_torch_version
|
22 |
+
from ...utils.accelerate_utils import apply_forward_hook
|
23 |
+
from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor
|
24 |
+
from ..modeling_outputs import AutoencoderKLOutput
|
25 |
+
from ..modeling_utils import ModelMixin
|
26 |
+
from ..unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder
|
27 |
+
from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
|
28 |
+
|
29 |
+
|
30 |
+
class TemporalDecoder(nn.Module):
|
31 |
+
def __init__(
|
32 |
+
self,
|
33 |
+
in_channels: int = 4,
|
34 |
+
out_channels: int = 3,
|
35 |
+
block_out_channels: Tuple[int] = (128, 256, 512, 512),
|
36 |
+
layers_per_block: int = 2,
|
37 |
+
):
|
38 |
+
super().__init__()
|
39 |
+
self.layers_per_block = layers_per_block
|
40 |
+
|
41 |
+
self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1)
|
42 |
+
self.mid_block = MidBlockTemporalDecoder(
|
43 |
+
num_layers=self.layers_per_block,
|
44 |
+
in_channels=block_out_channels[-1],
|
45 |
+
out_channels=block_out_channels[-1],
|
46 |
+
attention_head_dim=block_out_channels[-1],
|
47 |
+
)
|
48 |
+
|
49 |
+
# up
|
50 |
+
self.up_blocks = nn.ModuleList([])
|
51 |
+
reversed_block_out_channels = list(reversed(block_out_channels))
|
52 |
+
output_channel = reversed_block_out_channels[0]
|
53 |
+
for i in range(len(block_out_channels)):
|
54 |
+
prev_output_channel = output_channel
|
55 |
+
output_channel = reversed_block_out_channels[i]
|
56 |
+
|
57 |
+
is_final_block = i == len(block_out_channels) - 1
|
58 |
+
up_block = UpBlockTemporalDecoder(
|
59 |
+
num_layers=self.layers_per_block + 1,
|
60 |
+
in_channels=prev_output_channel,
|
61 |
+
out_channels=output_channel,
|
62 |
+
add_upsample=not is_final_block,
|
63 |
+
)
|
64 |
+
self.up_blocks.append(up_block)
|
65 |
+
prev_output_channel = output_channel
|
66 |
+
|
67 |
+
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6)
|
68 |
+
|
69 |
+
self.conv_act = nn.SiLU()
|
70 |
+
self.conv_out = torch.nn.Conv2d(
|
71 |
+
in_channels=block_out_channels[0],
|
72 |
+
out_channels=out_channels,
|
73 |
+
kernel_size=3,
|
74 |
+
padding=1,
|
75 |
+
)
|
76 |
+
|
77 |
+
conv_out_kernel_size = (3, 1, 1)
|
78 |
+
padding = [int(k // 2) for k in conv_out_kernel_size]
|
79 |
+
self.time_conv_out = torch.nn.Conv3d(
|
80 |
+
in_channels=out_channels,
|
81 |
+
out_channels=out_channels,
|
82 |
+
kernel_size=conv_out_kernel_size,
|
83 |
+
padding=padding,
|
84 |
+
)
|
85 |
+
|
86 |
+
self.gradient_checkpointing = False
|
87 |
+
|
88 |
+
def forward(
|
89 |
+
self,
|
90 |
+
sample: torch.FloatTensor,
|
91 |
+
image_only_indicator: torch.FloatTensor,
|
92 |
+
num_frames: int = 1,
|
93 |
+
) -> torch.FloatTensor:
|
94 |
+
r"""The forward method of the `Decoder` class."""
|
95 |
+
|
96 |
+
sample = self.conv_in(sample)
|
97 |
+
|
98 |
+
upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
|
99 |
+
if self.training and self.gradient_checkpointing:
|
100 |
+
|
101 |
+
def create_custom_forward(module):
|
102 |
+
def custom_forward(*inputs):
|
103 |
+
return module(*inputs)
|
104 |
+
|
105 |
+
return custom_forward
|
106 |
+
|
107 |
+
if is_torch_version(">=", "1.11.0"):
|
108 |
+
# middle
|
109 |
+
sample = torch.utils.checkpoint.checkpoint(
|
110 |
+
create_custom_forward(self.mid_block),
|
111 |
+
sample,
|
112 |
+
image_only_indicator,
|
113 |
+
use_reentrant=False,
|
114 |
+
)
|
115 |
+
sample = sample.to(upscale_dtype)
|
116 |
+
|
117 |
+
# up
|
118 |
+
for up_block in self.up_blocks:
|
119 |
+
sample = torch.utils.checkpoint.checkpoint(
|
120 |
+
create_custom_forward(up_block),
|
121 |
+
sample,
|
122 |
+
image_only_indicator,
|
123 |
+
use_reentrant=False,
|
124 |
+
)
|
125 |
+
else:
|
126 |
+
# middle
|
127 |
+
sample = torch.utils.checkpoint.checkpoint(
|
128 |
+
create_custom_forward(self.mid_block),
|
129 |
+
sample,
|
130 |
+
image_only_indicator,
|
131 |
+
)
|
132 |
+
sample = sample.to(upscale_dtype)
|
133 |
+
|
134 |
+
# up
|
135 |
+
for up_block in self.up_blocks:
|
136 |
+
sample = torch.utils.checkpoint.checkpoint(
|
137 |
+
create_custom_forward(up_block),
|
138 |
+
sample,
|
139 |
+
image_only_indicator,
|
140 |
+
)
|
141 |
+
else:
|
142 |
+
# middle
|
143 |
+
sample = self.mid_block(sample, image_only_indicator=image_only_indicator)
|
144 |
+
sample = sample.to(upscale_dtype)
|
145 |
+
|
146 |
+
# up
|
147 |
+
for up_block in self.up_blocks:
|
148 |
+
sample = up_block(sample, image_only_indicator=image_only_indicator)
|
149 |
+
|
150 |
+
# post-process
|
151 |
+
sample = self.conv_norm_out(sample)
|
152 |
+
sample = self.conv_act(sample)
|
153 |
+
sample = self.conv_out(sample)
|
154 |
+
|
155 |
+
batch_frames, channels, height, width = sample.shape
|
156 |
+
batch_size = batch_frames // num_frames
|
157 |
+
sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4)
|
158 |
+
sample = self.time_conv_out(sample)
|
159 |
+
|
160 |
+
sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width)
|
161 |
+
|
162 |
+
return sample
|
163 |
+
|
164 |
+
|
165 |
+
class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin, FromOriginalVAEMixin):
|
166 |
+
r"""
|
167 |
+
A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
|
168 |
+
|
169 |
+
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
|
170 |
+
for all models (such as downloading or saving).
|
171 |
+
|
172 |
+
Parameters:
|
173 |
+
in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
|
174 |
+
out_channels (int, *optional*, defaults to 3): Number of channels in the output.
|
175 |
+
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
|
176 |
+
Tuple of downsample block types.
|
177 |
+
block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
|
178 |
+
Tuple of block output channels.
|
179 |
+
layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block.
|
180 |
+
latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
|
181 |
+
sample_size (`int`, *optional*, defaults to `32`): Sample input size.
|
182 |
+
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
183 |
+
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
184 |
+
training set. This is used to scale the latent space to have unit variance when training the diffusion
|
185 |
+
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
|
186 |
+
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
|
187 |
+
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
|
188 |
+
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
|
189 |
+
force_upcast (`bool`, *optional*, default to `True`):
|
190 |
+
If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
|
191 |
+
can be fine-tuned / trained to a lower range without loosing too much precision in which case
|
192 |
+
`force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
|
193 |
+
"""
|
194 |
+
|
195 |
+
_supports_gradient_checkpointing = True
|
196 |
+
|
197 |
+
@register_to_config
|
198 |
+
def __init__(
|
199 |
+
self,
|
200 |
+
in_channels: int = 3,
|
201 |
+
out_channels: int = 3,
|
202 |
+
down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
|
203 |
+
block_out_channels: Tuple[int] = (64,),
|
204 |
+
layers_per_block: int = 1,
|
205 |
+
latent_channels: int = 4,
|
206 |
+
sample_size: int = 32,
|
207 |
+
scaling_factor: float = 0.18215,
|
208 |
+
force_upcast: float = True,
|
209 |
+
):
|
210 |
+
super().__init__()
|
211 |
+
|
212 |
+
# pass init params to Encoder
|
213 |
+
self.encoder = Encoder(
|
214 |
+
in_channels=in_channels,
|
215 |
+
out_channels=latent_channels,
|
216 |
+
down_block_types=down_block_types,
|
217 |
+
block_out_channels=block_out_channels,
|
218 |
+
layers_per_block=layers_per_block,
|
219 |
+
double_z=True,
|
220 |
+
)
|
221 |
+
|
222 |
+
# pass init params to Decoder
|
223 |
+
self.decoder = TemporalDecoder(
|
224 |
+
in_channels=latent_channels,
|
225 |
+
out_channels=out_channels,
|
226 |
+
block_out_channels=block_out_channels,
|
227 |
+
layers_per_block=layers_per_block,
|
228 |
+
)
|
229 |
+
|
230 |
+
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
|
231 |
+
|
232 |
+
sample_size = (
|
233 |
+
self.config.sample_size[0]
|
234 |
+
if isinstance(self.config.sample_size, (list, tuple))
|
235 |
+
else self.config.sample_size
|
236 |
+
)
|
237 |
+
self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
|
238 |
+
self.tile_overlap_factor = 0.25
|
239 |
+
|
240 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
241 |
+
if isinstance(module, (Encoder, TemporalDecoder)):
|
242 |
+
module.gradient_checkpointing = value
|
243 |
+
|
244 |
+
@property
|
245 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
|
246 |
+
def attn_processors(self) -> Dict[str, AttentionProcessor]:
|
247 |
+
r"""
|
248 |
+
Returns:
|
249 |
+
`dict` of attention processors: A dictionary containing all attention processors used in the model with
|
250 |
+
indexed by its weight name.
|
251 |
+
"""
|
252 |
+
# set recursively
|
253 |
+
processors = {}
|
254 |
+
|
255 |
+
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
|
256 |
+
if hasattr(module, "get_processor"):
|
257 |
+
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
|
258 |
+
|
259 |
+
for sub_name, child in module.named_children():
|
260 |
+
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
261 |
+
|
262 |
+
return processors
|
263 |
+
|
264 |
+
for name, module in self.named_children():
|
265 |
+
fn_recursive_add_processors(name, module, processors)
|
266 |
+
|
267 |
+
return processors
|
268 |
+
|
269 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
|
270 |
+
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
|
271 |
+
r"""
|
272 |
+
Sets the attention processor to use to compute attention.
|
273 |
+
|
274 |
+
Parameters:
|
275 |
+
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
276 |
+
The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
277 |
+
for **all** `Attention` layers.
|
278 |
+
|
279 |
+
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
280 |
+
processor. This is strongly recommended when setting trainable attention processors.
|
281 |
+
|
282 |
+
"""
|
283 |
+
count = len(self.attn_processors.keys())
|
284 |
+
|
285 |
+
if isinstance(processor, dict) and len(processor) != count:
|
286 |
+
raise ValueError(
|
287 |
+
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
288 |
+
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
289 |
+
)
|
290 |
+
|
291 |
+
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
292 |
+
if hasattr(module, "set_processor"):
|
293 |
+
if not isinstance(processor, dict):
|
294 |
+
module.set_processor(processor)
|
295 |
+
else:
|
296 |
+
module.set_processor(processor.pop(f"{name}.processor"))
|
297 |
+
|
298 |
+
for sub_name, child in module.named_children():
|
299 |
+
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
300 |
+
|
301 |
+
for name, module in self.named_children():
|
302 |
+
fn_recursive_attn_processor(name, module, processor)
|
303 |
+
|
304 |
+
def set_default_attn_processor(self):
|
305 |
+
"""
|
306 |
+
Disables custom attention processors and sets the default attention implementation.
|
307 |
+
"""
|
308 |
+
if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
309 |
+
processor = AttnProcessor()
|
310 |
+
else:
|
311 |
+
raise ValueError(
|
312 |
+
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
|
313 |
+
)
|
314 |
+
|
315 |
+
self.set_attn_processor(processor)
|
316 |
+
|
317 |
+
@apply_forward_hook
|
318 |
+
def encode(
|
319 |
+
self, x: torch.FloatTensor, return_dict: bool = True
|
320 |
+
) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
|
321 |
+
"""
|
322 |
+
Encode a batch of images into latents.
|
323 |
+
|
324 |
+
Args:
|
325 |
+
x (`torch.FloatTensor`): Input batch of images.
|
326 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
327 |
+
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
|
328 |
+
|
329 |
+
Returns:
|
330 |
+
The latent representations of the encoded images. If `return_dict` is True, a
|
331 |
+
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
|
332 |
+
"""
|
333 |
+
h = self.encoder(x)
|
334 |
+
moments = self.quant_conv(h)
|
335 |
+
posterior = DiagonalGaussianDistribution(moments)
|
336 |
+
|
337 |
+
if not return_dict:
|
338 |
+
return (posterior,)
|
339 |
+
|
340 |
+
return AutoencoderKLOutput(latent_dist=posterior)
|
341 |
+
|
342 |
+
@apply_forward_hook
|
343 |
+
def decode(
|
344 |
+
self,
|
345 |
+
z: torch.FloatTensor,
|
346 |
+
num_frames: int,
|
347 |
+
return_dict: bool = True,
|
348 |
+
) -> Union[DecoderOutput, torch.FloatTensor]:
|
349 |
+
"""
|
350 |
+
Decode a batch of images.
|
351 |
+
|
352 |
+
Args:
|
353 |
+
z (`torch.FloatTensor`): Input batch of latent vectors.
|
354 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
355 |
+
Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
|
356 |
+
|
357 |
+
Returns:
|
358 |
+
[`~models.vae.DecoderOutput`] or `tuple`:
|
359 |
+
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
|
360 |
+
returned.
|
361 |
+
|
362 |
+
"""
|
363 |
+
batch_size = z.shape[0] // num_frames
|
364 |
+
image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device)
|
365 |
+
decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator)
|
366 |
+
|
367 |
+
if not return_dict:
|
368 |
+
return (decoded,)
|
369 |
+
|
370 |
+
return DecoderOutput(sample=decoded)
|
371 |
+
|
372 |
+
def forward(
|
373 |
+
self,
|
374 |
+
sample: torch.FloatTensor,
|
375 |
+
sample_posterior: bool = False,
|
376 |
+
return_dict: bool = True,
|
377 |
+
generator: Optional[torch.Generator] = None,
|
378 |
+
num_frames: int = 1,
|
379 |
+
) -> Union[DecoderOutput, torch.FloatTensor]:
|
380 |
+
r"""
|
381 |
+
Args:
|
382 |
+
sample (`torch.FloatTensor`): Input sample.
|
383 |
+
sample_posterior (`bool`, *optional*, defaults to `False`):
|
384 |
+
Whether to sample from the posterior.
|
385 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
386 |
+
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
|
387 |
+
"""
|
388 |
+
x = sample
|
389 |
+
posterior = self.encode(x).latent_dist
|
390 |
+
if sample_posterior:
|
391 |
+
z = posterior.sample(generator=generator)
|
392 |
+
else:
|
393 |
+
z = posterior.mode()
|
394 |
+
|
395 |
+
dec = self.decode(z, num_frames=num_frames).sample
|
396 |
+
|
397 |
+
if not return_dict:
|
398 |
+
return (dec,)
|
399 |
+
|
400 |
+
return DecoderOutput(sample=dec)
|
diffusers/models/autoencoders/autoencoder_tiny.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 Ollin Boer Bohan and The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
|
16 |
+
from dataclasses import dataclass
|
17 |
+
from typing import Optional, Tuple, Union
|
18 |
+
|
19 |
+
import torch
|
20 |
+
|
21 |
+
from ...configuration_utils import ConfigMixin, register_to_config
|
22 |
+
from ...utils import BaseOutput
|
23 |
+
from ...utils.accelerate_utils import apply_forward_hook
|
24 |
+
from ..modeling_utils import ModelMixin
|
25 |
+
from .vae import DecoderOutput, DecoderTiny, EncoderTiny
|
26 |
+
|
27 |
+
|
28 |
+
@dataclass
|
29 |
+
class AutoencoderTinyOutput(BaseOutput):
|
30 |
+
"""
|
31 |
+
Output of AutoencoderTiny encoding method.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
latents (`torch.Tensor`): Encoded outputs of the `Encoder`.
|
35 |
+
|
36 |
+
"""
|
37 |
+
|
38 |
+
latents: torch.Tensor
|
39 |
+
|
40 |
+
|
41 |
+
class AutoencoderTiny(ModelMixin, ConfigMixin):
|
42 |
+
r"""
|
43 |
+
A tiny distilled VAE model for encoding images into latents and decoding latent representations into images.
|
44 |
+
|
45 |
+
[`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`.
|
46 |
+
|
47 |
+
This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for
|
48 |
+
all models (such as downloading or saving).
|
49 |
+
|
50 |
+
Parameters:
|
51 |
+
in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
|
52 |
+
out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
|
53 |
+
encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
|
54 |
+
Tuple of integers representing the number of output channels for each encoder block. The length of the
|
55 |
+
tuple should be equal to the number of encoder blocks.
|
56 |
+
decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
|
57 |
+
Tuple of integers representing the number of output channels for each decoder block. The length of the
|
58 |
+
tuple should be equal to the number of decoder blocks.
|
59 |
+
act_fn (`str`, *optional*, defaults to `"relu"`):
|
60 |
+
Activation function to be used throughout the model.
|
61 |
+
latent_channels (`int`, *optional*, defaults to 4):
|
62 |
+
Number of channels in the latent representation. The latent space acts as a compressed representation of
|
63 |
+
the input image.
|
64 |
+
upsampling_scaling_factor (`int`, *optional*, defaults to 2):
|
65 |
+
Scaling factor for upsampling in the decoder. It determines the size of the output image during the
|
66 |
+
upsampling process.
|
67 |
+
num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`):
|
68 |
+
Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The
|
69 |
+
length of the tuple should be equal to the number of stages in the encoder. Each stage has a different
|
70 |
+
number of encoder blocks.
|
71 |
+
num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`):
|
72 |
+
Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The
|
73 |
+
length of the tuple should be equal to the number of stages in the decoder. Each stage has a different
|
74 |
+
number of decoder blocks.
|
75 |
+
latent_magnitude (`float`, *optional*, defaults to 3.0):
|
76 |
+
Magnitude of the latent representation. This parameter scales the latent representation values to control
|
77 |
+
the extent of information preservation.
|
78 |
+
latent_shift (float, *optional*, defaults to 0.5):
|
79 |
+
Shift applied to the latent representation. This parameter controls the center of the latent space.
|
80 |
+
scaling_factor (`float`, *optional*, defaults to 1.0):
|
81 |
+
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
82 |
+
training set. This is used to scale the latent space to have unit variance when training the diffusion
|
83 |
+
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
|
84 |
+
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
|
85 |
+
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
|
86 |
+
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder,
|
87 |
+
however, no such scaling factor was used, hence the value of 1.0 as the default.
|
88 |
+
force_upcast (`bool`, *optional*, default to `False`):
|
89 |
+
If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
|
90 |
+
can be fine-tuned / trained to a lower range without losing too much precision, in which case
|
91 |
+
`force_upcast` can be set to `False` (see this fp16-friendly
|
92 |
+
[AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
|
93 |
+
"""
|
94 |
+
|
95 |
+
_supports_gradient_checkpointing = True
|
96 |
+
|
97 |
+
@register_to_config
|
98 |
+
def __init__(
|
99 |
+
self,
|
100 |
+
in_channels: int = 3,
|
101 |
+
out_channels: int = 3,
|
102 |
+
encoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
|
103 |
+
decoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
|
104 |
+
act_fn: str = "relu",
|
105 |
+
latent_channels: int = 4,
|
106 |
+
upsampling_scaling_factor: int = 2,
|
107 |
+
num_encoder_blocks: Tuple[int, ...] = (1, 3, 3, 3),
|
108 |
+
num_decoder_blocks: Tuple[int, ...] = (3, 3, 3, 1),
|
109 |
+
latent_magnitude: int = 3,
|
110 |
+
latent_shift: float = 0.5,
|
111 |
+
force_upcast: bool = False,
|
112 |
+
scaling_factor: float = 1.0,
|
113 |
+
):
|
114 |
+
super().__init__()
|
115 |
+
|
116 |
+
if len(encoder_block_out_channels) != len(num_encoder_blocks):
|
117 |
+
raise ValueError("`encoder_block_out_channels` should have the same length as `num_encoder_blocks`.")
|
118 |
+
if len(decoder_block_out_channels) != len(num_decoder_blocks):
|
119 |
+
raise ValueError("`decoder_block_out_channels` should have the same length as `num_decoder_blocks`.")
|
120 |
+
|
121 |
+
self.encoder = EncoderTiny(
|
122 |
+
in_channels=in_channels,
|
123 |
+
out_channels=latent_channels,
|
124 |
+
num_blocks=num_encoder_blocks,
|
125 |
+
block_out_channels=encoder_block_out_channels,
|
126 |
+
act_fn=act_fn,
|
127 |
+
)
|
128 |
+
|
129 |
+
self.decoder = DecoderTiny(
|
130 |
+
in_channels=latent_channels,
|
131 |
+
out_channels=out_channels,
|
132 |
+
num_blocks=num_decoder_blocks,
|
133 |
+
block_out_channels=decoder_block_out_channels,
|
134 |
+
upsampling_scaling_factor=upsampling_scaling_factor,
|
135 |
+
act_fn=act_fn,
|
136 |
+
)
|
137 |
+
|
138 |
+
self.latent_magnitude = latent_magnitude
|
139 |
+
self.latent_shift = latent_shift
|
140 |
+
self.scaling_factor = scaling_factor
|
141 |
+
|
142 |
+
self.use_slicing = False
|
143 |
+
self.use_tiling = False
|
144 |
+
|
145 |
+
# only relevant if vae tiling is enabled
|
146 |
+
self.spatial_scale_factor = 2**out_channels
|
147 |
+
self.tile_overlap_factor = 0.125
|
148 |
+
self.tile_sample_min_size = 512
|
149 |
+
self.tile_latent_min_size = self.tile_sample_min_size // self.spatial_scale_factor
|
150 |
+
|
151 |
+
self.register_to_config(block_out_channels=decoder_block_out_channels)
|
152 |
+
self.register_to_config(force_upcast=False)
|
153 |
+
|
154 |
+
def _set_gradient_checkpointing(self, module, value: bool = False) -> None:
|
155 |
+
if isinstance(module, (EncoderTiny, DecoderTiny)):
|
156 |
+
module.gradient_checkpointing = value
|
157 |
+
|
158 |
+
def scale_latents(self, x: torch.FloatTensor) -> torch.FloatTensor:
|
159 |
+
"""raw latents -> [0, 1]"""
|
160 |
+
return x.div(2 * self.latent_magnitude).add(self.latent_shift).clamp(0, 1)
|
161 |
+
|
162 |
+
def unscale_latents(self, x: torch.FloatTensor) -> torch.FloatTensor:
|
163 |
+
"""[0, 1] -> raw latents"""
|
164 |
+
return x.sub(self.latent_shift).mul(2 * self.latent_magnitude)
|
165 |
+
|
166 |
+
def enable_slicing(self) -> None:
|
167 |
+
r"""
|
168 |
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
169 |
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
170 |
+
"""
|
171 |
+
self.use_slicing = True
|
172 |
+
|
173 |
+
def disable_slicing(self) -> None:
|
174 |
+
r"""
|
175 |
+
Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
|
176 |
+
decoding in one step.
|
177 |
+
"""
|
178 |
+
self.use_slicing = False
|
179 |
+
|
180 |
+
def enable_tiling(self, use_tiling: bool = True) -> None:
|
181 |
+
r"""
|
182 |
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
183 |
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
184 |
+
processing larger images.
|
185 |
+
"""
|
186 |
+
self.use_tiling = use_tiling
|
187 |
+
|
188 |
+
def disable_tiling(self) -> None:
|
189 |
+
r"""
|
190 |
+
Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
|
191 |
+
decoding in one step.
|
192 |
+
"""
|
193 |
+
self.enable_tiling(False)
|
194 |
+
|
195 |
+
def _tiled_encode(self, x: torch.FloatTensor) -> torch.FloatTensor:
|
196 |
+
r"""Encode a batch of images using a tiled encoder.
|
197 |
+
|
198 |
+
When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
|
199 |
+
steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
|
200 |
+
tiles overlap and are blended together to form a smooth output.
|
201 |
+
|
202 |
+
Args:
|
203 |
+
x (`torch.FloatTensor`): Input batch of images.
|
204 |
+
|
205 |
+
Returns:
|
206 |
+
`torch.FloatTensor`: Encoded batch of images.
|
207 |
+
"""
|
208 |
+
# scale of encoder output relative to input
|
209 |
+
sf = self.spatial_scale_factor
|
210 |
+
tile_size = self.tile_sample_min_size
|
211 |
+
|
212 |
+
# number of pixels to blend and to traverse between tile
|
213 |
+
blend_size = int(tile_size * self.tile_overlap_factor)
|
214 |
+
traverse_size = tile_size - blend_size
|
215 |
+
|
216 |
+
# tiles index (up/left)
|
217 |
+
ti = range(0, x.shape[-2], traverse_size)
|
218 |
+
tj = range(0, x.shape[-1], traverse_size)
|
219 |
+
|
220 |
+
# mask for blending
|
221 |
+
blend_masks = torch.stack(
|
222 |
+
torch.meshgrid([torch.arange(tile_size / sf) / (blend_size / sf - 1)] * 2, indexing="ij")
|
223 |
+
)
|
224 |
+
blend_masks = blend_masks.clamp(0, 1).to(x.device)
|
225 |
+
|
226 |
+
# output array
|
227 |
+
out = torch.zeros(x.shape[0], 4, x.shape[-2] // sf, x.shape[-1] // sf, device=x.device)
|
228 |
+
for i in ti:
|
229 |
+
for j in tj:
|
230 |
+
tile_in = x[..., i : i + tile_size, j : j + tile_size]
|
231 |
+
# tile result
|
232 |
+
tile_out = out[..., i // sf : (i + tile_size) // sf, j // sf : (j + tile_size) // sf]
|
233 |
+
tile = self.encoder(tile_in)
|
234 |
+
h, w = tile.shape[-2], tile.shape[-1]
|
235 |
+
# blend tile result into output
|
236 |
+
blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
|
237 |
+
blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
|
238 |
+
blend_mask = blend_mask_i * blend_mask_j
|
239 |
+
tile, blend_mask = tile[..., :h, :w], blend_mask[..., :h, :w]
|
240 |
+
tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
|
241 |
+
return out
|
242 |
+
|
243 |
+
def _tiled_decode(self, x: torch.FloatTensor) -> torch.FloatTensor:
|
244 |
+
r"""Encode a batch of images using a tiled encoder.
|
245 |
+
|
246 |
+
When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
|
247 |
+
steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
|
248 |
+
tiles overlap and are blended together to form a smooth output.
|
249 |
+
|
250 |
+
Args:
|
251 |
+
x (`torch.FloatTensor`): Input batch of images.
|
252 |
+
|
253 |
+
Returns:
|
254 |
+
`torch.FloatTensor`: Encoded batch of images.
|
255 |
+
"""
|
256 |
+
# scale of decoder output relative to input
|
257 |
+
sf = self.spatial_scale_factor
|
258 |
+
tile_size = self.tile_latent_min_size
|
259 |
+
|
260 |
+
# number of pixels to blend and to traverse between tiles
|
261 |
+
blend_size = int(tile_size * self.tile_overlap_factor)
|
262 |
+
traverse_size = tile_size - blend_size
|
263 |
+
|
264 |
+
# tiles index (up/left)
|
265 |
+
ti = range(0, x.shape[-2], traverse_size)
|
266 |
+
tj = range(0, x.shape[-1], traverse_size)
|
267 |
+
|
268 |
+
# mask for blending
|
269 |
+
blend_masks = torch.stack(
|
270 |
+
torch.meshgrid([torch.arange(tile_size * sf) / (blend_size * sf - 1)] * 2, indexing="ij")
|
271 |
+
)
|
272 |
+
blend_masks = blend_masks.clamp(0, 1).to(x.device)
|
273 |
+
|
274 |
+
# output array
|
275 |
+
out = torch.zeros(x.shape[0], 3, x.shape[-2] * sf, x.shape[-1] * sf, device=x.device)
|
276 |
+
for i in ti:
|
277 |
+
for j in tj:
|
278 |
+
tile_in = x[..., i : i + tile_size, j : j + tile_size]
|
279 |
+
# tile result
|
280 |
+
tile_out = out[..., i * sf : (i + tile_size) * sf, j * sf : (j + tile_size) * sf]
|
281 |
+
tile = self.decoder(tile_in)
|
282 |
+
h, w = tile.shape[-2], tile.shape[-1]
|
283 |
+
# blend tile result into output
|
284 |
+
blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
|
285 |
+
blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
|
286 |
+
blend_mask = (blend_mask_i * blend_mask_j)[..., :h, :w]
|
287 |
+
tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
|
288 |
+
return out
|
289 |
+
|
290 |
+
@apply_forward_hook
|
291 |
+
def encode(
|
292 |
+
self, x: torch.FloatTensor, return_dict: bool = True
|
293 |
+
) -> Union[AutoencoderTinyOutput, Tuple[torch.FloatTensor]]:
|
294 |
+
if self.use_slicing and x.shape[0] > 1:
|
295 |
+
output = [self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x) for x_slice in x.split(1)]
|
296 |
+
output = torch.cat(output)
|
297 |
+
else:
|
298 |
+
output = self._tiled_encode(x) if self.use_tiling else self.encoder(x)
|
299 |
+
|
300 |
+
if not return_dict:
|
301 |
+
return (output,)
|
302 |
+
|
303 |
+
return AutoencoderTinyOutput(latents=output)
|
304 |
+
|
305 |
+
@apply_forward_hook
|
306 |
+
def decode(
|
307 |
+
self, x: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True
|
308 |
+
) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
|
309 |
+
if self.use_slicing and x.shape[0] > 1:
|
310 |
+
output = [self._tiled_decode(x_slice) if self.use_tiling else self.decoder(x) for x_slice in x.split(1)]
|
311 |
+
output = torch.cat(output)
|
312 |
+
else:
|
313 |
+
output = self._tiled_decode(x) if self.use_tiling else self.decoder(x)
|
314 |
+
|
315 |
+
if not return_dict:
|
316 |
+
return (output,)
|
317 |
+
|
318 |
+
return DecoderOutput(sample=output)
|
319 |
+
|
320 |
+
def forward(
|
321 |
+
self,
|
322 |
+
sample: torch.FloatTensor,
|
323 |
+
return_dict: bool = True,
|
324 |
+
) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
|
325 |
+
r"""
|
326 |
+
Args:
|
327 |
+
sample (`torch.FloatTensor`): Input sample.
|
328 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
329 |
+
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
|
330 |
+
"""
|
331 |
+
enc = self.encode(sample).latents
|
332 |
+
|
333 |
+
# scale latents to be in [0, 1], then quantize latents to a byte tensor,
|
334 |
+
# as if we were storing the latents in an RGBA uint8 image.
|
335 |
+
scaled_enc = self.scale_latents(enc).mul_(255).round_().byte()
|
336 |
+
|
337 |
+
# unquantize latents back into [0, 1], then unscale latents back to their original range,
|
338 |
+
# as if we were loading the latents from an RGBA uint8 image.
|
339 |
+
unscaled_enc = self.unscale_latents(scaled_enc / 255.0)
|
340 |
+
|
341 |
+
dec = self.decode(unscaled_enc)
|
342 |
+
|
343 |
+
if not return_dict:
|
344 |
+
return (dec,)
|
345 |
+
return DecoderOutput(sample=dec)
|
diffusers/models/autoencoders/consistency_decoder_vae.py
ADDED
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from typing import Dict, Optional, Tuple, Union
|
16 |
+
|
17 |
+
import torch
|
18 |
+
import torch.nn.functional as F
|
19 |
+
from torch import nn
|
20 |
+
|
21 |
+
from ...configuration_utils import ConfigMixin, register_to_config
|
22 |
+
from ...schedulers import ConsistencyDecoderScheduler
|
23 |
+
from ...utils import BaseOutput
|
24 |
+
from ...utils.accelerate_utils import apply_forward_hook
|
25 |
+
from ...utils.torch_utils import randn_tensor
|
26 |
+
from ..attention_processor import (
|
27 |
+
ADDED_KV_ATTENTION_PROCESSORS,
|
28 |
+
CROSS_ATTENTION_PROCESSORS,
|
29 |
+
AttentionProcessor,
|
30 |
+
AttnAddedKVProcessor,
|
31 |
+
AttnProcessor,
|
32 |
+
)
|
33 |
+
from ..modeling_utils import ModelMixin
|
34 |
+
from ..unet_2d import UNet2DModel
|
35 |
+
from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
|
36 |
+
|
37 |
+
|
38 |
+
@dataclass
|
39 |
+
class ConsistencyDecoderVAEOutput(BaseOutput):
|
40 |
+
"""
|
41 |
+
Output of encoding method.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
latent_dist (`DiagonalGaussianDistribution`):
|
45 |
+
Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
|
46 |
+
`DiagonalGaussianDistribution` allows for sampling latents from the distribution.
|
47 |
+
"""
|
48 |
+
|
49 |
+
latent_dist: "DiagonalGaussianDistribution"
|
50 |
+
|
51 |
+
|
52 |
+
class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
|
53 |
+
r"""
|
54 |
+
The consistency decoder used with DALL-E 3.
|
55 |
+
|
56 |
+
Examples:
|
57 |
+
```py
|
58 |
+
>>> import torch
|
59 |
+
>>> from diffusers import StableDiffusionPipeline, ConsistencyDecoderVAE
|
60 |
+
|
61 |
+
>>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
|
62 |
+
>>> pipe = StableDiffusionPipeline.from_pretrained(
|
63 |
+
... "runwayml/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
|
64 |
+
... ).to("cuda")
|
65 |
+
|
66 |
+
>>> pipe("horse", generator=torch.manual_seed(0)).images
|
67 |
+
```
|
68 |
+
"""
|
69 |
+
|
70 |
+
@register_to_config
|
71 |
+
def __init__(
|
72 |
+
self,
|
73 |
+
scaling_factor: float = 0.18215,
|
74 |
+
latent_channels: int = 4,
|
75 |
+
encoder_act_fn: str = "silu",
|
76 |
+
encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
|
77 |
+
encoder_double_z: bool = True,
|
78 |
+
encoder_down_block_types: Tuple[str, ...] = (
|
79 |
+
"DownEncoderBlock2D",
|
80 |
+
"DownEncoderBlock2D",
|
81 |
+
"DownEncoderBlock2D",
|
82 |
+
"DownEncoderBlock2D",
|
83 |
+
),
|
84 |
+
encoder_in_channels: int = 3,
|
85 |
+
encoder_layers_per_block: int = 2,
|
86 |
+
encoder_norm_num_groups: int = 32,
|
87 |
+
encoder_out_channels: int = 4,
|
88 |
+
decoder_add_attention: bool = False,
|
89 |
+
decoder_block_out_channels: Tuple[int, ...] = (320, 640, 1024, 1024),
|
90 |
+
decoder_down_block_types: Tuple[str, ...] = (
|
91 |
+
"ResnetDownsampleBlock2D",
|
92 |
+
"ResnetDownsampleBlock2D",
|
93 |
+
"ResnetDownsampleBlock2D",
|
94 |
+
"ResnetDownsampleBlock2D",
|
95 |
+
),
|
96 |
+
decoder_downsample_padding: int = 1,
|
97 |
+
decoder_in_channels: int = 7,
|
98 |
+
decoder_layers_per_block: int = 3,
|
99 |
+
decoder_norm_eps: float = 1e-05,
|
100 |
+
decoder_norm_num_groups: int = 32,
|
101 |
+
decoder_num_train_timesteps: int = 1024,
|
102 |
+
decoder_out_channels: int = 6,
|
103 |
+
decoder_resnet_time_scale_shift: str = "scale_shift",
|
104 |
+
decoder_time_embedding_type: str = "learned",
|
105 |
+
decoder_up_block_types: Tuple[str, ...] = (
|
106 |
+
"ResnetUpsampleBlock2D",
|
107 |
+
"ResnetUpsampleBlock2D",
|
108 |
+
"ResnetUpsampleBlock2D",
|
109 |
+
"ResnetUpsampleBlock2D",
|
110 |
+
),
|
111 |
+
):
|
112 |
+
super().__init__()
|
113 |
+
self.encoder = Encoder(
|
114 |
+
act_fn=encoder_act_fn,
|
115 |
+
block_out_channels=encoder_block_out_channels,
|
116 |
+
double_z=encoder_double_z,
|
117 |
+
down_block_types=encoder_down_block_types,
|
118 |
+
in_channels=encoder_in_channels,
|
119 |
+
layers_per_block=encoder_layers_per_block,
|
120 |
+
norm_num_groups=encoder_norm_num_groups,
|
121 |
+
out_channels=encoder_out_channels,
|
122 |
+
)
|
123 |
+
|
124 |
+
self.decoder_unet = UNet2DModel(
|
125 |
+
add_attention=decoder_add_attention,
|
126 |
+
block_out_channels=decoder_block_out_channels,
|
127 |
+
down_block_types=decoder_down_block_types,
|
128 |
+
downsample_padding=decoder_downsample_padding,
|
129 |
+
in_channels=decoder_in_channels,
|
130 |
+
layers_per_block=decoder_layers_per_block,
|
131 |
+
norm_eps=decoder_norm_eps,
|
132 |
+
norm_num_groups=decoder_norm_num_groups,
|
133 |
+
num_train_timesteps=decoder_num_train_timesteps,
|
134 |
+
out_channels=decoder_out_channels,
|
135 |
+
resnet_time_scale_shift=decoder_resnet_time_scale_shift,
|
136 |
+
time_embedding_type=decoder_time_embedding_type,
|
137 |
+
up_block_types=decoder_up_block_types,
|
138 |
+
)
|
139 |
+
self.decoder_scheduler = ConsistencyDecoderScheduler()
|
140 |
+
self.register_to_config(block_out_channels=encoder_block_out_channels)
|
141 |
+
self.register_to_config(force_upcast=False)
|
142 |
+
self.register_buffer(
|
143 |
+
"means",
|
144 |
+
torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None],
|
145 |
+
persistent=False,
|
146 |
+
)
|
147 |
+
self.register_buffer(
|
148 |
+
"stds", torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False
|
149 |
+
)
|
150 |
+
|
151 |
+
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
|
152 |
+
|
153 |
+
self.use_slicing = False
|
154 |
+
self.use_tiling = False
|
155 |
+
|
156 |
+
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_tiling
|
157 |
+
def enable_tiling(self, use_tiling: bool = True):
|
158 |
+
r"""
|
159 |
+
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
160 |
+
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
161 |
+
processing larger images.
|
162 |
+
"""
|
163 |
+
self.use_tiling = use_tiling
|
164 |
+
|
165 |
+
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_tiling
|
166 |
+
def disable_tiling(self):
|
167 |
+
r"""
|
168 |
+
Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
|
169 |
+
decoding in one step.
|
170 |
+
"""
|
171 |
+
self.enable_tiling(False)
|
172 |
+
|
173 |
+
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_slicing
|
174 |
+
def enable_slicing(self):
|
175 |
+
r"""
|
176 |
+
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
177 |
+
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
178 |
+
"""
|
179 |
+
self.use_slicing = True
|
180 |
+
|
181 |
+
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_slicing
|
182 |
+
def disable_slicing(self):
|
183 |
+
r"""
|
184 |
+
Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
|
185 |
+
decoding in one step.
|
186 |
+
"""
|
187 |
+
self.use_slicing = False
|
188 |
+
|
189 |
+
@property
|
190 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
|
191 |
+
def attn_processors(self) -> Dict[str, AttentionProcessor]:
|
192 |
+
r"""
|
193 |
+
Returns:
|
194 |
+
`dict` of attention processors: A dictionary containing all attention processors used in the model with
|
195 |
+
indexed by its weight name.
|
196 |
+
"""
|
197 |
+
# set recursively
|
198 |
+
processors = {}
|
199 |
+
|
200 |
+
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
|
201 |
+
if hasattr(module, "get_processor"):
|
202 |
+
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
|
203 |
+
|
204 |
+
for sub_name, child in module.named_children():
|
205 |
+
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
206 |
+
|
207 |
+
return processors
|
208 |
+
|
209 |
+
for name, module in self.named_children():
|
210 |
+
fn_recursive_add_processors(name, module, processors)
|
211 |
+
|
212 |
+
return processors
|
213 |
+
|
214 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
|
215 |
+
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
|
216 |
+
r"""
|
217 |
+
Sets the attention processor to use to compute attention.
|
218 |
+
|
219 |
+
Parameters:
|
220 |
+
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
221 |
+
The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
222 |
+
for **all** `Attention` layers.
|
223 |
+
|
224 |
+
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
225 |
+
processor. This is strongly recommended when setting trainable attention processors.
|
226 |
+
|
227 |
+
"""
|
228 |
+
count = len(self.attn_processors.keys())
|
229 |
+
|
230 |
+
if isinstance(processor, dict) and len(processor) != count:
|
231 |
+
raise ValueError(
|
232 |
+
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
233 |
+
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
234 |
+
)
|
235 |
+
|
236 |
+
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
237 |
+
if hasattr(module, "set_processor"):
|
238 |
+
if not isinstance(processor, dict):
|
239 |
+
module.set_processor(processor)
|
240 |
+
else:
|
241 |
+
module.set_processor(processor.pop(f"{name}.processor"))
|
242 |
+
|
243 |
+
for sub_name, child in module.named_children():
|
244 |
+
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
245 |
+
|
246 |
+
for name, module in self.named_children():
|
247 |
+
fn_recursive_attn_processor(name, module, processor)
|
248 |
+
|
249 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
|
250 |
+
def set_default_attn_processor(self):
|
251 |
+
"""
|
252 |
+
Disables custom attention processors and sets the default attention implementation.
|
253 |
+
"""
|
254 |
+
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
255 |
+
processor = AttnAddedKVProcessor()
|
256 |
+
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
257 |
+
processor = AttnProcessor()
|
258 |
+
else:
|
259 |
+
raise ValueError(
|
260 |
+
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
|
261 |
+
)
|
262 |
+
|
263 |
+
self.set_attn_processor(processor)
|
264 |
+
|
265 |
+
@apply_forward_hook
|
266 |
+
def encode(
|
267 |
+
self, x: torch.FloatTensor, return_dict: bool = True
|
268 |
+
) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]:
|
269 |
+
"""
|
270 |
+
Encode a batch of images into latents.
|
271 |
+
|
272 |
+
Args:
|
273 |
+
x (`torch.FloatTensor`): Input batch of images.
|
274 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
275 |
+
Whether to return a [`~models.consistecy_decoder_vae.ConsistencyDecoderOoutput`] instead of a plain
|
276 |
+
tuple.
|
277 |
+
|
278 |
+
Returns:
|
279 |
+
The latent representations of the encoded images. If `return_dict` is True, a
|
280 |
+
[`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a plain `tuple`
|
281 |
+
is returned.
|
282 |
+
"""
|
283 |
+
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
|
284 |
+
return self.tiled_encode(x, return_dict=return_dict)
|
285 |
+
|
286 |
+
if self.use_slicing and x.shape[0] > 1:
|
287 |
+
encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
|
288 |
+
h = torch.cat(encoded_slices)
|
289 |
+
else:
|
290 |
+
h = self.encoder(x)
|
291 |
+
|
292 |
+
moments = self.quant_conv(h)
|
293 |
+
posterior = DiagonalGaussianDistribution(moments)
|
294 |
+
|
295 |
+
if not return_dict:
|
296 |
+
return (posterior,)
|
297 |
+
|
298 |
+
return ConsistencyDecoderVAEOutput(latent_dist=posterior)
|
299 |
+
|
300 |
+
@apply_forward_hook
|
301 |
+
def decode(
|
302 |
+
self,
|
303 |
+
z: torch.FloatTensor,
|
304 |
+
generator: Optional[torch.Generator] = None,
|
305 |
+
return_dict: bool = True,
|
306 |
+
num_inference_steps: int = 2,
|
307 |
+
) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
|
308 |
+
z = (z * self.config.scaling_factor - self.means) / self.stds
|
309 |
+
|
310 |
+
scale_factor = 2 ** (len(self.config.block_out_channels) - 1)
|
311 |
+
z = F.interpolate(z, mode="nearest", scale_factor=scale_factor)
|
312 |
+
|
313 |
+
batch_size, _, height, width = z.shape
|
314 |
+
|
315 |
+
self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device)
|
316 |
+
|
317 |
+
x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor(
|
318 |
+
(batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device
|
319 |
+
)
|
320 |
+
|
321 |
+
for t in self.decoder_scheduler.timesteps:
|
322 |
+
model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1)
|
323 |
+
model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :]
|
324 |
+
prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample
|
325 |
+
x_t = prev_sample
|
326 |
+
|
327 |
+
x_0 = x_t
|
328 |
+
|
329 |
+
if not return_dict:
|
330 |
+
return (x_0,)
|
331 |
+
|
332 |
+
return DecoderOutput(sample=x_0)
|
333 |
+
|
334 |
+
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_v
|
335 |
+
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
|
336 |
+
blend_extent = min(a.shape[2], b.shape[2], blend_extent)
|
337 |
+
for y in range(blend_extent):
|
338 |
+
b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
|
339 |
+
return b
|
340 |
+
|
341 |
+
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_h
|
342 |
+
def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
|
343 |
+
blend_extent = min(a.shape[3], b.shape[3], blend_extent)
|
344 |
+
for x in range(blend_extent):
|
345 |
+
b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
|
346 |
+
return b
|
347 |
+
|
348 |
+
def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> ConsistencyDecoderVAEOutput:
|
349 |
+
r"""Encode a batch of images using a tiled encoder.
|
350 |
+
|
351 |
+
When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
|
352 |
+
steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
|
353 |
+
different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
|
354 |
+
tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
|
355 |
+
output, but they should be much less noticeable.
|
356 |
+
|
357 |
+
Args:
|
358 |
+
x (`torch.FloatTensor`): Input batch of images.
|
359 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
360 |
+
Whether or not to return a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] instead of a
|
361 |
+
plain tuple.
|
362 |
+
|
363 |
+
Returns:
|
364 |
+
[`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`:
|
365 |
+
If return_dict is True, a [`~models.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned,
|
366 |
+
otherwise a plain `tuple` is returned.
|
367 |
+
"""
|
368 |
+
overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
|
369 |
+
blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
|
370 |
+
row_limit = self.tile_latent_min_size - blend_extent
|
371 |
+
|
372 |
+
# Split the image into 512x512 tiles and encode them separately.
|
373 |
+
rows = []
|
374 |
+
for i in range(0, x.shape[2], overlap_size):
|
375 |
+
row = []
|
376 |
+
for j in range(0, x.shape[3], overlap_size):
|
377 |
+
tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
|
378 |
+
tile = self.encoder(tile)
|
379 |
+
tile = self.quant_conv(tile)
|
380 |
+
row.append(tile)
|
381 |
+
rows.append(row)
|
382 |
+
result_rows = []
|
383 |
+
for i, row in enumerate(rows):
|
384 |
+
result_row = []
|
385 |
+
for j, tile in enumerate(row):
|
386 |
+
# blend the above tile and the left tile
|
387 |
+
# to the current tile and add the current tile to the result row
|
388 |
+
if i > 0:
|
389 |
+
tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
|
390 |
+
if j > 0:
|
391 |
+
tile = self.blend_h(row[j - 1], tile, blend_extent)
|
392 |
+
result_row.append(tile[:, :, :row_limit, :row_limit])
|
393 |
+
result_rows.append(torch.cat(result_row, dim=3))
|
394 |
+
|
395 |
+
moments = torch.cat(result_rows, dim=2)
|
396 |
+
posterior = DiagonalGaussianDistribution(moments)
|
397 |
+
|
398 |
+
if not return_dict:
|
399 |
+
return (posterior,)
|
400 |
+
|
401 |
+
return ConsistencyDecoderVAEOutput(latent_dist=posterior)
|
402 |
+
|
403 |
+
def forward(
|
404 |
+
self,
|
405 |
+
sample: torch.FloatTensor,
|
406 |
+
sample_posterior: bool = False,
|
407 |
+
return_dict: bool = True,
|
408 |
+
generator: Optional[torch.Generator] = None,
|
409 |
+
) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]:
|
410 |
+
r"""
|
411 |
+
Args:
|
412 |
+
sample (`torch.FloatTensor`): Input sample.
|
413 |
+
sample_posterior (`bool`, *optional*, defaults to `False`):
|
414 |
+
Whether to sample from the posterior.
|
415 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
416 |
+
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
|
417 |
+
generator (`torch.Generator`, *optional*, defaults to `None`):
|
418 |
+
Generator to use for sampling.
|
419 |
+
|
420 |
+
Returns:
|
421 |
+
[`DecoderOutput`] or `tuple`:
|
422 |
+
If return_dict is True, a [`DecoderOutput`] is returned, otherwise a plain `tuple` is returned.
|
423 |
+
"""
|
424 |
+
x = sample
|
425 |
+
posterior = self.encode(x).latent_dist
|
426 |
+
if sample_posterior:
|
427 |
+
z = posterior.sample(generator=generator)
|
428 |
+
else:
|
429 |
+
z = posterior.mode()
|
430 |
+
dec = self.decode(z, generator=generator).sample
|
431 |
+
|
432 |
+
if not return_dict:
|
433 |
+
return (dec,)
|
434 |
+
|
435 |
+
return DecoderOutput(sample=dec)
|
diffusers/models/autoencoders/vae.py
ADDED
@@ -0,0 +1,983 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from typing import Optional, Tuple
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import torch
|
19 |
+
import torch.nn as nn
|
20 |
+
|
21 |
+
from ...utils import BaseOutput, is_torch_version
|
22 |
+
from ...utils.torch_utils import randn_tensor
|
23 |
+
from ..activations import get_activation
|
24 |
+
from ..attention_processor import SpatialNorm
|
25 |
+
from ..unet_2d_blocks import (
|
26 |
+
AutoencoderTinyBlock,
|
27 |
+
UNetMidBlock2D,
|
28 |
+
get_down_block,
|
29 |
+
get_up_block,
|
30 |
+
)
|
31 |
+
|
32 |
+
|
33 |
+
@dataclass
|
34 |
+
class DecoderOutput(BaseOutput):
|
35 |
+
r"""
|
36 |
+
Output of decoding method.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
40 |
+
The decoded output sample from the last layer of the model.
|
41 |
+
"""
|
42 |
+
|
43 |
+
sample: torch.FloatTensor
|
44 |
+
|
45 |
+
|
46 |
+
class Encoder(nn.Module):
|
47 |
+
r"""
|
48 |
+
The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation.
|
49 |
+
|
50 |
+
Args:
|
51 |
+
in_channels (`int`, *optional*, defaults to 3):
|
52 |
+
The number of input channels.
|
53 |
+
out_channels (`int`, *optional*, defaults to 3):
|
54 |
+
The number of output channels.
|
55 |
+
down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
|
56 |
+
The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available
|
57 |
+
options.
|
58 |
+
block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
|
59 |
+
The number of output channels for each block.
|
60 |
+
layers_per_block (`int`, *optional*, defaults to 2):
|
61 |
+
The number of layers per block.
|
62 |
+
norm_num_groups (`int`, *optional*, defaults to 32):
|
63 |
+
The number of groups for normalization.
|
64 |
+
act_fn (`str`, *optional*, defaults to `"silu"`):
|
65 |
+
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
|
66 |
+
double_z (`bool`, *optional*, defaults to `True`):
|
67 |
+
Whether to double the number of output channels for the last block.
|
68 |
+
"""
|
69 |
+
|
70 |
+
def __init__(
|
71 |
+
self,
|
72 |
+
in_channels: int = 3,
|
73 |
+
out_channels: int = 3,
|
74 |
+
down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
|
75 |
+
block_out_channels: Tuple[int, ...] = (64,),
|
76 |
+
layers_per_block: int = 2,
|
77 |
+
norm_num_groups: int = 32,
|
78 |
+
act_fn: str = "silu",
|
79 |
+
double_z: bool = True,
|
80 |
+
mid_block_add_attention=True,
|
81 |
+
):
|
82 |
+
super().__init__()
|
83 |
+
self.layers_per_block = layers_per_block
|
84 |
+
|
85 |
+
self.conv_in = nn.Conv2d(
|
86 |
+
in_channels,
|
87 |
+
block_out_channels[0],
|
88 |
+
kernel_size=3,
|
89 |
+
stride=1,
|
90 |
+
padding=1,
|
91 |
+
)
|
92 |
+
|
93 |
+
self.mid_block = None
|
94 |
+
self.down_blocks = nn.ModuleList([])
|
95 |
+
|
96 |
+
# down
|
97 |
+
output_channel = block_out_channels[0]
|
98 |
+
for i, down_block_type in enumerate(down_block_types):
|
99 |
+
input_channel = output_channel
|
100 |
+
output_channel = block_out_channels[i]
|
101 |
+
is_final_block = i == len(block_out_channels) - 1
|
102 |
+
|
103 |
+
down_block = get_down_block(
|
104 |
+
down_block_type,
|
105 |
+
num_layers=self.layers_per_block,
|
106 |
+
in_channels=input_channel,
|
107 |
+
out_channels=output_channel,
|
108 |
+
add_downsample=not is_final_block,
|
109 |
+
resnet_eps=1e-6,
|
110 |
+
downsample_padding=0,
|
111 |
+
resnet_act_fn=act_fn,
|
112 |
+
resnet_groups=norm_num_groups,
|
113 |
+
attention_head_dim=output_channel,
|
114 |
+
temb_channels=None,
|
115 |
+
)
|
116 |
+
self.down_blocks.append(down_block)
|
117 |
+
|
118 |
+
# mid
|
119 |
+
self.mid_block = UNetMidBlock2D(
|
120 |
+
in_channels=block_out_channels[-1],
|
121 |
+
resnet_eps=1e-6,
|
122 |
+
resnet_act_fn=act_fn,
|
123 |
+
output_scale_factor=1,
|
124 |
+
resnet_time_scale_shift="default",
|
125 |
+
attention_head_dim=block_out_channels[-1],
|
126 |
+
resnet_groups=norm_num_groups,
|
127 |
+
temb_channels=None,
|
128 |
+
add_attention=mid_block_add_attention,
|
129 |
+
)
|
130 |
+
|
131 |
+
# out
|
132 |
+
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
|
133 |
+
self.conv_act = nn.SiLU()
|
134 |
+
|
135 |
+
conv_out_channels = 2 * out_channels if double_z else out_channels
|
136 |
+
self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
|
137 |
+
|
138 |
+
self.gradient_checkpointing = False
|
139 |
+
|
140 |
+
def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor:
|
141 |
+
r"""The forward method of the `Encoder` class."""
|
142 |
+
|
143 |
+
sample = self.conv_in(sample)
|
144 |
+
|
145 |
+
if self.training and self.gradient_checkpointing:
|
146 |
+
|
147 |
+
def create_custom_forward(module):
|
148 |
+
def custom_forward(*inputs):
|
149 |
+
return module(*inputs)
|
150 |
+
|
151 |
+
return custom_forward
|
152 |
+
|
153 |
+
# down
|
154 |
+
if is_torch_version(">=", "1.11.0"):
|
155 |
+
for down_block in self.down_blocks:
|
156 |
+
sample = torch.utils.checkpoint.checkpoint(
|
157 |
+
create_custom_forward(down_block), sample, use_reentrant=False
|
158 |
+
)
|
159 |
+
# middle
|
160 |
+
sample = torch.utils.checkpoint.checkpoint(
|
161 |
+
create_custom_forward(self.mid_block), sample, use_reentrant=False
|
162 |
+
)
|
163 |
+
else:
|
164 |
+
for down_block in self.down_blocks:
|
165 |
+
sample = torch.utils.checkpoint.checkpoint(create_custom_forward(down_block), sample)
|
166 |
+
# middle
|
167 |
+
sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample)
|
168 |
+
|
169 |
+
else:
|
170 |
+
# down
|
171 |
+
for down_block in self.down_blocks:
|
172 |
+
sample = down_block(sample)
|
173 |
+
|
174 |
+
# middle
|
175 |
+
sample = self.mid_block(sample)
|
176 |
+
|
177 |
+
# post-process
|
178 |
+
sample = self.conv_norm_out(sample)
|
179 |
+
sample = self.conv_act(sample)
|
180 |
+
sample = self.conv_out(sample)
|
181 |
+
|
182 |
+
return sample
|
183 |
+
|
184 |
+
|
185 |
+
class Decoder(nn.Module):
|
186 |
+
r"""
|
187 |
+
The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample.
|
188 |
+
|
189 |
+
Args:
|
190 |
+
in_channels (`int`, *optional*, defaults to 3):
|
191 |
+
The number of input channels.
|
192 |
+
out_channels (`int`, *optional*, defaults to 3):
|
193 |
+
The number of output channels.
|
194 |
+
up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
|
195 |
+
The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
|
196 |
+
block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
|
197 |
+
The number of output channels for each block.
|
198 |
+
layers_per_block (`int`, *optional*, defaults to 2):
|
199 |
+
The number of layers per block.
|
200 |
+
norm_num_groups (`int`, *optional*, defaults to 32):
|
201 |
+
The number of groups for normalization.
|
202 |
+
act_fn (`str`, *optional*, defaults to `"silu"`):
|
203 |
+
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
|
204 |
+
norm_type (`str`, *optional*, defaults to `"group"`):
|
205 |
+
The normalization type to use. Can be either `"group"` or `"spatial"`.
|
206 |
+
"""
|
207 |
+
|
208 |
+
def __init__(
|
209 |
+
self,
|
210 |
+
in_channels: int = 3,
|
211 |
+
out_channels: int = 3,
|
212 |
+
up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
|
213 |
+
block_out_channels: Tuple[int, ...] = (64,),
|
214 |
+
layers_per_block: int = 2,
|
215 |
+
norm_num_groups: int = 32,
|
216 |
+
act_fn: str = "silu",
|
217 |
+
norm_type: str = "group", # group, spatial
|
218 |
+
mid_block_add_attention=True,
|
219 |
+
):
|
220 |
+
super().__init__()
|
221 |
+
self.layers_per_block = layers_per_block
|
222 |
+
|
223 |
+
self.conv_in = nn.Conv2d(
|
224 |
+
in_channels,
|
225 |
+
block_out_channels[-1],
|
226 |
+
kernel_size=3,
|
227 |
+
stride=1,
|
228 |
+
padding=1,
|
229 |
+
)
|
230 |
+
|
231 |
+
self.mid_block = None
|
232 |
+
self.up_blocks = nn.ModuleList([])
|
233 |
+
|
234 |
+
temb_channels = in_channels if norm_type == "spatial" else None
|
235 |
+
|
236 |
+
# mid
|
237 |
+
self.mid_block = UNetMidBlock2D(
|
238 |
+
in_channels=block_out_channels[-1],
|
239 |
+
resnet_eps=1e-6,
|
240 |
+
resnet_act_fn=act_fn,
|
241 |
+
output_scale_factor=1,
|
242 |
+
resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
|
243 |
+
attention_head_dim=block_out_channels[-1],
|
244 |
+
resnet_groups=norm_num_groups,
|
245 |
+
temb_channels=temb_channels,
|
246 |
+
add_attention=mid_block_add_attention,
|
247 |
+
)
|
248 |
+
|
249 |
+
# up
|
250 |
+
reversed_block_out_channels = list(reversed(block_out_channels))
|
251 |
+
output_channel = reversed_block_out_channels[0]
|
252 |
+
for i, up_block_type in enumerate(up_block_types):
|
253 |
+
prev_output_channel = output_channel
|
254 |
+
output_channel = reversed_block_out_channels[i]
|
255 |
+
|
256 |
+
is_final_block = i == len(block_out_channels) - 1
|
257 |
+
|
258 |
+
up_block = get_up_block(
|
259 |
+
up_block_type,
|
260 |
+
num_layers=self.layers_per_block + 1,
|
261 |
+
in_channels=prev_output_channel,
|
262 |
+
out_channels=output_channel,
|
263 |
+
prev_output_channel=None,
|
264 |
+
add_upsample=not is_final_block,
|
265 |
+
resnet_eps=1e-6,
|
266 |
+
resnet_act_fn=act_fn,
|
267 |
+
resnet_groups=norm_num_groups,
|
268 |
+
attention_head_dim=output_channel,
|
269 |
+
temb_channels=temb_channels,
|
270 |
+
resnet_time_scale_shift=norm_type,
|
271 |
+
)
|
272 |
+
self.up_blocks.append(up_block)
|
273 |
+
prev_output_channel = output_channel
|
274 |
+
|
275 |
+
# out
|
276 |
+
if norm_type == "spatial":
|
277 |
+
self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
|
278 |
+
else:
|
279 |
+
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
|
280 |
+
self.conv_act = nn.SiLU()
|
281 |
+
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
|
282 |
+
|
283 |
+
self.gradient_checkpointing = False
|
284 |
+
|
285 |
+
def forward(
|
286 |
+
self,
|
287 |
+
sample: torch.FloatTensor,
|
288 |
+
latent_embeds: Optional[torch.FloatTensor] = None,
|
289 |
+
) -> torch.FloatTensor:
|
290 |
+
r"""The forward method of the `Decoder` class."""
|
291 |
+
|
292 |
+
sample = self.conv_in(sample)
|
293 |
+
|
294 |
+
upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
|
295 |
+
if self.training and self.gradient_checkpointing:
|
296 |
+
|
297 |
+
def create_custom_forward(module):
|
298 |
+
def custom_forward(*inputs):
|
299 |
+
return module(*inputs)
|
300 |
+
|
301 |
+
return custom_forward
|
302 |
+
|
303 |
+
if is_torch_version(">=", "1.11.0"):
|
304 |
+
# middle
|
305 |
+
sample = torch.utils.checkpoint.checkpoint(
|
306 |
+
create_custom_forward(self.mid_block),
|
307 |
+
sample,
|
308 |
+
latent_embeds,
|
309 |
+
use_reentrant=False,
|
310 |
+
)
|
311 |
+
sample = sample.to(upscale_dtype)
|
312 |
+
|
313 |
+
# up
|
314 |
+
for up_block in self.up_blocks:
|
315 |
+
sample = torch.utils.checkpoint.checkpoint(
|
316 |
+
create_custom_forward(up_block),
|
317 |
+
sample,
|
318 |
+
latent_embeds,
|
319 |
+
use_reentrant=False,
|
320 |
+
)
|
321 |
+
else:
|
322 |
+
# middle
|
323 |
+
sample = torch.utils.checkpoint.checkpoint(
|
324 |
+
create_custom_forward(self.mid_block), sample, latent_embeds
|
325 |
+
)
|
326 |
+
sample = sample.to(upscale_dtype)
|
327 |
+
|
328 |
+
# up
|
329 |
+
for up_block in self.up_blocks:
|
330 |
+
sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds)
|
331 |
+
else:
|
332 |
+
# middle
|
333 |
+
sample = self.mid_block(sample, latent_embeds)
|
334 |
+
sample = sample.to(upscale_dtype)
|
335 |
+
|
336 |
+
# up
|
337 |
+
for up_block in self.up_blocks:
|
338 |
+
sample = up_block(sample, latent_embeds)
|
339 |
+
|
340 |
+
# post-process
|
341 |
+
if latent_embeds is None:
|
342 |
+
sample = self.conv_norm_out(sample)
|
343 |
+
else:
|
344 |
+
sample = self.conv_norm_out(sample, latent_embeds)
|
345 |
+
sample = self.conv_act(sample)
|
346 |
+
sample = self.conv_out(sample)
|
347 |
+
|
348 |
+
return sample
|
349 |
+
|
350 |
+
|
351 |
+
class UpSample(nn.Module):
|
352 |
+
r"""
|
353 |
+
The `UpSample` layer of a variational autoencoder that upsamples its input.
|
354 |
+
|
355 |
+
Args:
|
356 |
+
in_channels (`int`, *optional*, defaults to 3):
|
357 |
+
The number of input channels.
|
358 |
+
out_channels (`int`, *optional*, defaults to 3):
|
359 |
+
The number of output channels.
|
360 |
+
"""
|
361 |
+
|
362 |
+
def __init__(
|
363 |
+
self,
|
364 |
+
in_channels: int,
|
365 |
+
out_channels: int,
|
366 |
+
) -> None:
|
367 |
+
super().__init__()
|
368 |
+
self.in_channels = in_channels
|
369 |
+
self.out_channels = out_channels
|
370 |
+
self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1)
|
371 |
+
|
372 |
+
def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
|
373 |
+
r"""The forward method of the `UpSample` class."""
|
374 |
+
x = torch.relu(x)
|
375 |
+
x = self.deconv(x)
|
376 |
+
return x
|
377 |
+
|
378 |
+
|
379 |
+
class MaskConditionEncoder(nn.Module):
|
380 |
+
"""
|
381 |
+
used in AsymmetricAutoencoderKL
|
382 |
+
"""
|
383 |
+
|
384 |
+
def __init__(
|
385 |
+
self,
|
386 |
+
in_ch: int,
|
387 |
+
out_ch: int = 192,
|
388 |
+
res_ch: int = 768,
|
389 |
+
stride: int = 16,
|
390 |
+
) -> None:
|
391 |
+
super().__init__()
|
392 |
+
|
393 |
+
channels = []
|
394 |
+
while stride > 1:
|
395 |
+
stride = stride // 2
|
396 |
+
in_ch_ = out_ch * 2
|
397 |
+
if out_ch > res_ch:
|
398 |
+
out_ch = res_ch
|
399 |
+
if stride == 1:
|
400 |
+
in_ch_ = res_ch
|
401 |
+
channels.append((in_ch_, out_ch))
|
402 |
+
out_ch *= 2
|
403 |
+
|
404 |
+
out_channels = []
|
405 |
+
for _in_ch, _out_ch in channels:
|
406 |
+
out_channels.append(_out_ch)
|
407 |
+
out_channels.append(channels[-1][0])
|
408 |
+
|
409 |
+
layers = []
|
410 |
+
in_ch_ = in_ch
|
411 |
+
for l in range(len(out_channels)):
|
412 |
+
out_ch_ = out_channels[l]
|
413 |
+
if l == 0 or l == 1:
|
414 |
+
layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=3, stride=1, padding=1))
|
415 |
+
else:
|
416 |
+
layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=4, stride=2, padding=1))
|
417 |
+
in_ch_ = out_ch_
|
418 |
+
|
419 |
+
self.layers = nn.Sequential(*layers)
|
420 |
+
|
421 |
+
def forward(self, x: torch.FloatTensor, mask=None) -> torch.FloatTensor:
|
422 |
+
r"""The forward method of the `MaskConditionEncoder` class."""
|
423 |
+
out = {}
|
424 |
+
for l in range(len(self.layers)):
|
425 |
+
layer = self.layers[l]
|
426 |
+
x = layer(x)
|
427 |
+
out[str(tuple(x.shape))] = x
|
428 |
+
x = torch.relu(x)
|
429 |
+
return out
|
430 |
+
|
431 |
+
|
432 |
+
class MaskConditionDecoder(nn.Module):
|
433 |
+
r"""The `MaskConditionDecoder` should be used in combination with [`AsymmetricAutoencoderKL`] to enhance the model's
|
434 |
+
decoder with a conditioner on the mask and masked image.
|
435 |
+
|
436 |
+
Args:
|
437 |
+
in_channels (`int`, *optional*, defaults to 3):
|
438 |
+
The number of input channels.
|
439 |
+
out_channels (`int`, *optional*, defaults to 3):
|
440 |
+
The number of output channels.
|
441 |
+
up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
|
442 |
+
The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
|
443 |
+
block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
|
444 |
+
The number of output channels for each block.
|
445 |
+
layers_per_block (`int`, *optional*, defaults to 2):
|
446 |
+
The number of layers per block.
|
447 |
+
norm_num_groups (`int`, *optional*, defaults to 32):
|
448 |
+
The number of groups for normalization.
|
449 |
+
act_fn (`str`, *optional*, defaults to `"silu"`):
|
450 |
+
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
|
451 |
+
norm_type (`str`, *optional*, defaults to `"group"`):
|
452 |
+
The normalization type to use. Can be either `"group"` or `"spatial"`.
|
453 |
+
"""
|
454 |
+
|
455 |
+
def __init__(
|
456 |
+
self,
|
457 |
+
in_channels: int = 3,
|
458 |
+
out_channels: int = 3,
|
459 |
+
up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
|
460 |
+
block_out_channels: Tuple[int, ...] = (64,),
|
461 |
+
layers_per_block: int = 2,
|
462 |
+
norm_num_groups: int = 32,
|
463 |
+
act_fn: str = "silu",
|
464 |
+
norm_type: str = "group", # group, spatial
|
465 |
+
):
|
466 |
+
super().__init__()
|
467 |
+
self.layers_per_block = layers_per_block
|
468 |
+
|
469 |
+
self.conv_in = nn.Conv2d(
|
470 |
+
in_channels,
|
471 |
+
block_out_channels[-1],
|
472 |
+
kernel_size=3,
|
473 |
+
stride=1,
|
474 |
+
padding=1,
|
475 |
+
)
|
476 |
+
|
477 |
+
self.mid_block = None
|
478 |
+
self.up_blocks = nn.ModuleList([])
|
479 |
+
|
480 |
+
temb_channels = in_channels if norm_type == "spatial" else None
|
481 |
+
|
482 |
+
# mid
|
483 |
+
self.mid_block = UNetMidBlock2D(
|
484 |
+
in_channels=block_out_channels[-1],
|
485 |
+
resnet_eps=1e-6,
|
486 |
+
resnet_act_fn=act_fn,
|
487 |
+
output_scale_factor=1,
|
488 |
+
resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
|
489 |
+
attention_head_dim=block_out_channels[-1],
|
490 |
+
resnet_groups=norm_num_groups,
|
491 |
+
temb_channels=temb_channels,
|
492 |
+
)
|
493 |
+
|
494 |
+
# up
|
495 |
+
reversed_block_out_channels = list(reversed(block_out_channels))
|
496 |
+
output_channel = reversed_block_out_channels[0]
|
497 |
+
for i, up_block_type in enumerate(up_block_types):
|
498 |
+
prev_output_channel = output_channel
|
499 |
+
output_channel = reversed_block_out_channels[i]
|
500 |
+
|
501 |
+
is_final_block = i == len(block_out_channels) - 1
|
502 |
+
|
503 |
+
up_block = get_up_block(
|
504 |
+
up_block_type,
|
505 |
+
num_layers=self.layers_per_block + 1,
|
506 |
+
in_channels=prev_output_channel,
|
507 |
+
out_channels=output_channel,
|
508 |
+
prev_output_channel=None,
|
509 |
+
add_upsample=not is_final_block,
|
510 |
+
resnet_eps=1e-6,
|
511 |
+
resnet_act_fn=act_fn,
|
512 |
+
resnet_groups=norm_num_groups,
|
513 |
+
attention_head_dim=output_channel,
|
514 |
+
temb_channels=temb_channels,
|
515 |
+
resnet_time_scale_shift=norm_type,
|
516 |
+
)
|
517 |
+
self.up_blocks.append(up_block)
|
518 |
+
prev_output_channel = output_channel
|
519 |
+
|
520 |
+
# condition encoder
|
521 |
+
self.condition_encoder = MaskConditionEncoder(
|
522 |
+
in_ch=out_channels,
|
523 |
+
out_ch=block_out_channels[0],
|
524 |
+
res_ch=block_out_channels[-1],
|
525 |
+
)
|
526 |
+
|
527 |
+
# out
|
528 |
+
if norm_type == "spatial":
|
529 |
+
self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
|
530 |
+
else:
|
531 |
+
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
|
532 |
+
self.conv_act = nn.SiLU()
|
533 |
+
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
|
534 |
+
|
535 |
+
self.gradient_checkpointing = False
|
536 |
+
|
537 |
+
def forward(
|
538 |
+
self,
|
539 |
+
z: torch.FloatTensor,
|
540 |
+
image: Optional[torch.FloatTensor] = None,
|
541 |
+
mask: Optional[torch.FloatTensor] = None,
|
542 |
+
latent_embeds: Optional[torch.FloatTensor] = None,
|
543 |
+
) -> torch.FloatTensor:
|
544 |
+
r"""The forward method of the `MaskConditionDecoder` class."""
|
545 |
+
sample = z
|
546 |
+
sample = self.conv_in(sample)
|
547 |
+
|
548 |
+
upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
|
549 |
+
if self.training and self.gradient_checkpointing:
|
550 |
+
|
551 |
+
def create_custom_forward(module):
|
552 |
+
def custom_forward(*inputs):
|
553 |
+
return module(*inputs)
|
554 |
+
|
555 |
+
return custom_forward
|
556 |
+
|
557 |
+
if is_torch_version(">=", "1.11.0"):
|
558 |
+
# middle
|
559 |
+
sample = torch.utils.checkpoint.checkpoint(
|
560 |
+
create_custom_forward(self.mid_block),
|
561 |
+
sample,
|
562 |
+
latent_embeds,
|
563 |
+
use_reentrant=False,
|
564 |
+
)
|
565 |
+
sample = sample.to(upscale_dtype)
|
566 |
+
|
567 |
+
# condition encoder
|
568 |
+
if image is not None and mask is not None:
|
569 |
+
masked_image = (1 - mask) * image
|
570 |
+
im_x = torch.utils.checkpoint.checkpoint(
|
571 |
+
create_custom_forward(self.condition_encoder),
|
572 |
+
masked_image,
|
573 |
+
mask,
|
574 |
+
use_reentrant=False,
|
575 |
+
)
|
576 |
+
|
577 |
+
# up
|
578 |
+
for up_block in self.up_blocks:
|
579 |
+
if image is not None and mask is not None:
|
580 |
+
sample_ = im_x[str(tuple(sample.shape))]
|
581 |
+
mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
|
582 |
+
sample = sample * mask_ + sample_ * (1 - mask_)
|
583 |
+
sample = torch.utils.checkpoint.checkpoint(
|
584 |
+
create_custom_forward(up_block),
|
585 |
+
sample,
|
586 |
+
latent_embeds,
|
587 |
+
use_reentrant=False,
|
588 |
+
)
|
589 |
+
if image is not None and mask is not None:
|
590 |
+
sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
|
591 |
+
else:
|
592 |
+
# middle
|
593 |
+
sample = torch.utils.checkpoint.checkpoint(
|
594 |
+
create_custom_forward(self.mid_block), sample, latent_embeds
|
595 |
+
)
|
596 |
+
sample = sample.to(upscale_dtype)
|
597 |
+
|
598 |
+
# condition encoder
|
599 |
+
if image is not None and mask is not None:
|
600 |
+
masked_image = (1 - mask) * image
|
601 |
+
im_x = torch.utils.checkpoint.checkpoint(
|
602 |
+
create_custom_forward(self.condition_encoder),
|
603 |
+
masked_image,
|
604 |
+
mask,
|
605 |
+
)
|
606 |
+
|
607 |
+
# up
|
608 |
+
for up_block in self.up_blocks:
|
609 |
+
if image is not None and mask is not None:
|
610 |
+
sample_ = im_x[str(tuple(sample.shape))]
|
611 |
+
mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
|
612 |
+
sample = sample * mask_ + sample_ * (1 - mask_)
|
613 |
+
sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds)
|
614 |
+
if image is not None and mask is not None:
|
615 |
+
sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
|
616 |
+
else:
|
617 |
+
# middle
|
618 |
+
sample = self.mid_block(sample, latent_embeds)
|
619 |
+
sample = sample.to(upscale_dtype)
|
620 |
+
|
621 |
+
# condition encoder
|
622 |
+
if image is not None and mask is not None:
|
623 |
+
masked_image = (1 - mask) * image
|
624 |
+
im_x = self.condition_encoder(masked_image, mask)
|
625 |
+
|
626 |
+
# up
|
627 |
+
for up_block in self.up_blocks:
|
628 |
+
if image is not None and mask is not None:
|
629 |
+
sample_ = im_x[str(tuple(sample.shape))]
|
630 |
+
mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode="nearest")
|
631 |
+
sample = sample * mask_ + sample_ * (1 - mask_)
|
632 |
+
sample = up_block(sample, latent_embeds)
|
633 |
+
if image is not None and mask is not None:
|
634 |
+
sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask)
|
635 |
+
|
636 |
+
# post-process
|
637 |
+
if latent_embeds is None:
|
638 |
+
sample = self.conv_norm_out(sample)
|
639 |
+
else:
|
640 |
+
sample = self.conv_norm_out(sample, latent_embeds)
|
641 |
+
sample = self.conv_act(sample)
|
642 |
+
sample = self.conv_out(sample)
|
643 |
+
|
644 |
+
return sample
|
645 |
+
|
646 |
+
|
647 |
+
class VectorQuantizer(nn.Module):
|
648 |
+
"""
|
649 |
+
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix
|
650 |
+
multiplications and allows for post-hoc remapping of indices.
|
651 |
+
"""
|
652 |
+
|
653 |
+
# NOTE: due to a bug the beta term was applied to the wrong term. for
|
654 |
+
# backwards compatibility we use the buggy version by default, but you can
|
655 |
+
# specify legacy=False to fix it.
|
656 |
+
def __init__(
|
657 |
+
self,
|
658 |
+
n_e: int,
|
659 |
+
vq_embed_dim: int,
|
660 |
+
beta: float,
|
661 |
+
remap=None,
|
662 |
+
unknown_index: str = "random",
|
663 |
+
sane_index_shape: bool = False,
|
664 |
+
legacy: bool = True,
|
665 |
+
):
|
666 |
+
super().__init__()
|
667 |
+
self.n_e = n_e
|
668 |
+
self.vq_embed_dim = vq_embed_dim
|
669 |
+
self.beta = beta
|
670 |
+
self.legacy = legacy
|
671 |
+
|
672 |
+
self.embedding = nn.Embedding(self.n_e, self.vq_embed_dim)
|
673 |
+
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
|
674 |
+
|
675 |
+
self.remap = remap
|
676 |
+
if self.remap is not None:
|
677 |
+
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
678 |
+
self.used: torch.Tensor
|
679 |
+
self.re_embed = self.used.shape[0]
|
680 |
+
self.unknown_index = unknown_index # "random" or "extra" or integer
|
681 |
+
if self.unknown_index == "extra":
|
682 |
+
self.unknown_index = self.re_embed
|
683 |
+
self.re_embed = self.re_embed + 1
|
684 |
+
print(
|
685 |
+
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
|
686 |
+
f"Using {self.unknown_index} for unknown indices."
|
687 |
+
)
|
688 |
+
else:
|
689 |
+
self.re_embed = n_e
|
690 |
+
|
691 |
+
self.sane_index_shape = sane_index_shape
|
692 |
+
|
693 |
+
def remap_to_used(self, inds: torch.LongTensor) -> torch.LongTensor:
|
694 |
+
ishape = inds.shape
|
695 |
+
assert len(ishape) > 1
|
696 |
+
inds = inds.reshape(ishape[0], -1)
|
697 |
+
used = self.used.to(inds)
|
698 |
+
match = (inds[:, :, None] == used[None, None, ...]).long()
|
699 |
+
new = match.argmax(-1)
|
700 |
+
unknown = match.sum(2) < 1
|
701 |
+
if self.unknown_index == "random":
|
702 |
+
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
|
703 |
+
else:
|
704 |
+
new[unknown] = self.unknown_index
|
705 |
+
return new.reshape(ishape)
|
706 |
+
|
707 |
+
def unmap_to_all(self, inds: torch.LongTensor) -> torch.LongTensor:
|
708 |
+
ishape = inds.shape
|
709 |
+
assert len(ishape) > 1
|
710 |
+
inds = inds.reshape(ishape[0], -1)
|
711 |
+
used = self.used.to(inds)
|
712 |
+
if self.re_embed > self.used.shape[0]: # extra token
|
713 |
+
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
|
714 |
+
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
|
715 |
+
return back.reshape(ishape)
|
716 |
+
|
717 |
+
def forward(self, z: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, Tuple]:
|
718 |
+
# reshape z -> (batch, height, width, channel) and flatten
|
719 |
+
z = z.permute(0, 2, 3, 1).contiguous()
|
720 |
+
z_flattened = z.view(-1, self.vq_embed_dim)
|
721 |
+
|
722 |
+
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
723 |
+
min_encoding_indices = torch.argmin(torch.cdist(z_flattened, self.embedding.weight), dim=1)
|
724 |
+
|
725 |
+
z_q = self.embedding(min_encoding_indices).view(z.shape)
|
726 |
+
perplexity = None
|
727 |
+
min_encodings = None
|
728 |
+
|
729 |
+
# compute loss for embedding
|
730 |
+
if not self.legacy:
|
731 |
+
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
|
732 |
+
else:
|
733 |
+
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
|
734 |
+
|
735 |
+
# preserve gradients
|
736 |
+
z_q: torch.FloatTensor = z + (z_q - z).detach()
|
737 |
+
|
738 |
+
# reshape back to match original input shape
|
739 |
+
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
740 |
+
|
741 |
+
if self.remap is not None:
|
742 |
+
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
|
743 |
+
min_encoding_indices = self.remap_to_used(min_encoding_indices)
|
744 |
+
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
|
745 |
+
|
746 |
+
if self.sane_index_shape:
|
747 |
+
min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3])
|
748 |
+
|
749 |
+
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
|
750 |
+
|
751 |
+
def get_codebook_entry(self, indices: torch.LongTensor, shape: Tuple[int, ...]) -> torch.FloatTensor:
|
752 |
+
# shape specifying (batch, height, width, channel)
|
753 |
+
if self.remap is not None:
|
754 |
+
indices = indices.reshape(shape[0], -1) # add batch axis
|
755 |
+
indices = self.unmap_to_all(indices)
|
756 |
+
indices = indices.reshape(-1) # flatten again
|
757 |
+
|
758 |
+
# get quantized latent vectors
|
759 |
+
z_q: torch.FloatTensor = self.embedding(indices)
|
760 |
+
|
761 |
+
if shape is not None:
|
762 |
+
z_q = z_q.view(shape)
|
763 |
+
# reshape back to match original input shape
|
764 |
+
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
765 |
+
|
766 |
+
return z_q
|
767 |
+
|
768 |
+
|
769 |
+
class DiagonalGaussianDistribution(object):
|
770 |
+
def __init__(self, parameters: torch.Tensor, deterministic: bool = False):
|
771 |
+
self.parameters = parameters
|
772 |
+
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
|
773 |
+
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
|
774 |
+
self.deterministic = deterministic
|
775 |
+
self.std = torch.exp(0.5 * self.logvar)
|
776 |
+
self.var = torch.exp(self.logvar)
|
777 |
+
if self.deterministic:
|
778 |
+
self.var = self.std = torch.zeros_like(
|
779 |
+
self.mean, device=self.parameters.device, dtype=self.parameters.dtype
|
780 |
+
)
|
781 |
+
|
782 |
+
def sample(self, generator: Optional[torch.Generator] = None) -> torch.FloatTensor:
|
783 |
+
# make sure sample is on the same device as the parameters and has same dtype
|
784 |
+
sample = randn_tensor(
|
785 |
+
self.mean.shape,
|
786 |
+
generator=generator,
|
787 |
+
device=self.parameters.device,
|
788 |
+
dtype=self.parameters.dtype,
|
789 |
+
)
|
790 |
+
x = self.mean + self.std * sample
|
791 |
+
return x
|
792 |
+
|
793 |
+
def kl(self, other: "DiagonalGaussianDistribution" = None) -> torch.Tensor:
|
794 |
+
if self.deterministic:
|
795 |
+
return torch.Tensor([0.0])
|
796 |
+
else:
|
797 |
+
if other is None:
|
798 |
+
return 0.5 * torch.sum(
|
799 |
+
torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
|
800 |
+
dim=[1, 2, 3],
|
801 |
+
)
|
802 |
+
else:
|
803 |
+
return 0.5 * torch.sum(
|
804 |
+
torch.pow(self.mean - other.mean, 2) / other.var
|
805 |
+
+ self.var / other.var
|
806 |
+
- 1.0
|
807 |
+
- self.logvar
|
808 |
+
+ other.logvar,
|
809 |
+
dim=[1, 2, 3],
|
810 |
+
)
|
811 |
+
|
812 |
+
def nll(self, sample: torch.Tensor, dims: Tuple[int, ...] = [1, 2, 3]) -> torch.Tensor:
|
813 |
+
if self.deterministic:
|
814 |
+
return torch.Tensor([0.0])
|
815 |
+
logtwopi = np.log(2.0 * np.pi)
|
816 |
+
return 0.5 * torch.sum(
|
817 |
+
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
|
818 |
+
dim=dims,
|
819 |
+
)
|
820 |
+
|
821 |
+
def mode(self) -> torch.Tensor:
|
822 |
+
return self.mean
|
823 |
+
|
824 |
+
|
825 |
+
class EncoderTiny(nn.Module):
|
826 |
+
r"""
|
827 |
+
The `EncoderTiny` layer is a simpler version of the `Encoder` layer.
|
828 |
+
|
829 |
+
Args:
|
830 |
+
in_channels (`int`):
|
831 |
+
The number of input channels.
|
832 |
+
out_channels (`int`):
|
833 |
+
The number of output channels.
|
834 |
+
num_blocks (`Tuple[int, ...]`):
|
835 |
+
Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to
|
836 |
+
use.
|
837 |
+
block_out_channels (`Tuple[int, ...]`):
|
838 |
+
The number of output channels for each block.
|
839 |
+
act_fn (`str`):
|
840 |
+
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
|
841 |
+
"""
|
842 |
+
|
843 |
+
def __init__(
|
844 |
+
self,
|
845 |
+
in_channels: int,
|
846 |
+
out_channels: int,
|
847 |
+
num_blocks: Tuple[int, ...],
|
848 |
+
block_out_channels: Tuple[int, ...],
|
849 |
+
act_fn: str,
|
850 |
+
):
|
851 |
+
super().__init__()
|
852 |
+
|
853 |
+
layers = []
|
854 |
+
for i, num_block in enumerate(num_blocks):
|
855 |
+
num_channels = block_out_channels[i]
|
856 |
+
|
857 |
+
if i == 0:
|
858 |
+
layers.append(nn.Conv2d(in_channels, num_channels, kernel_size=3, padding=1))
|
859 |
+
else:
|
860 |
+
layers.append(
|
861 |
+
nn.Conv2d(
|
862 |
+
num_channels,
|
863 |
+
num_channels,
|
864 |
+
kernel_size=3,
|
865 |
+
padding=1,
|
866 |
+
stride=2,
|
867 |
+
bias=False,
|
868 |
+
)
|
869 |
+
)
|
870 |
+
|
871 |
+
for _ in range(num_block):
|
872 |
+
layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))
|
873 |
+
|
874 |
+
layers.append(nn.Conv2d(block_out_channels[-1], out_channels, kernel_size=3, padding=1))
|
875 |
+
|
876 |
+
self.layers = nn.Sequential(*layers)
|
877 |
+
self.gradient_checkpointing = False
|
878 |
+
|
879 |
+
def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
|
880 |
+
r"""The forward method of the `EncoderTiny` class."""
|
881 |
+
if self.training and self.gradient_checkpointing:
|
882 |
+
|
883 |
+
def create_custom_forward(module):
|
884 |
+
def custom_forward(*inputs):
|
885 |
+
return module(*inputs)
|
886 |
+
|
887 |
+
return custom_forward
|
888 |
+
|
889 |
+
if is_torch_version(">=", "1.11.0"):
|
890 |
+
x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False)
|
891 |
+
else:
|
892 |
+
x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x)
|
893 |
+
|
894 |
+
else:
|
895 |
+
# scale image from [-1, 1] to [0, 1] to match TAESD convention
|
896 |
+
x = self.layers(x.add(1).div(2))
|
897 |
+
|
898 |
+
return x
|
899 |
+
|
900 |
+
|
901 |
+
class DecoderTiny(nn.Module):
|
902 |
+
r"""
|
903 |
+
The `DecoderTiny` layer is a simpler version of the `Decoder` layer.
|
904 |
+
|
905 |
+
Args:
|
906 |
+
in_channels (`int`):
|
907 |
+
The number of input channels.
|
908 |
+
out_channels (`int`):
|
909 |
+
The number of output channels.
|
910 |
+
num_blocks (`Tuple[int, ...]`):
|
911 |
+
Each value of the tuple represents a Conv2d layer followed by `value` number of `AutoencoderTinyBlock`'s to
|
912 |
+
use.
|
913 |
+
block_out_channels (`Tuple[int, ...]`):
|
914 |
+
The number of output channels for each block.
|
915 |
+
upsampling_scaling_factor (`int`):
|
916 |
+
The scaling factor to use for upsampling.
|
917 |
+
act_fn (`str`):
|
918 |
+
The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
|
919 |
+
"""
|
920 |
+
|
921 |
+
def __init__(
|
922 |
+
self,
|
923 |
+
in_channels: int,
|
924 |
+
out_channels: int,
|
925 |
+
num_blocks: Tuple[int, ...],
|
926 |
+
block_out_channels: Tuple[int, ...],
|
927 |
+
upsampling_scaling_factor: int,
|
928 |
+
act_fn: str,
|
929 |
+
):
|
930 |
+
super().__init__()
|
931 |
+
|
932 |
+
layers = [
|
933 |
+
nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=1),
|
934 |
+
get_activation(act_fn),
|
935 |
+
]
|
936 |
+
|
937 |
+
for i, num_block in enumerate(num_blocks):
|
938 |
+
is_final_block = i == (len(num_blocks) - 1)
|
939 |
+
num_channels = block_out_channels[i]
|
940 |
+
|
941 |
+
for _ in range(num_block):
|
942 |
+
layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn))
|
943 |
+
|
944 |
+
if not is_final_block:
|
945 |
+
layers.append(nn.Upsample(scale_factor=upsampling_scaling_factor))
|
946 |
+
|
947 |
+
conv_out_channel = num_channels if not is_final_block else out_channels
|
948 |
+
layers.append(
|
949 |
+
nn.Conv2d(
|
950 |
+
num_channels,
|
951 |
+
conv_out_channel,
|
952 |
+
kernel_size=3,
|
953 |
+
padding=1,
|
954 |
+
bias=is_final_block,
|
955 |
+
)
|
956 |
+
)
|
957 |
+
|
958 |
+
self.layers = nn.Sequential(*layers)
|
959 |
+
self.gradient_checkpointing = False
|
960 |
+
|
961 |
+
def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
|
962 |
+
r"""The forward method of the `DecoderTiny` class."""
|
963 |
+
# Clamp.
|
964 |
+
x = torch.tanh(x / 3) * 3
|
965 |
+
|
966 |
+
if self.training and self.gradient_checkpointing:
|
967 |
+
|
968 |
+
def create_custom_forward(module):
|
969 |
+
def custom_forward(*inputs):
|
970 |
+
return module(*inputs)
|
971 |
+
|
972 |
+
return custom_forward
|
973 |
+
|
974 |
+
if is_torch_version(">=", "1.11.0"):
|
975 |
+
x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False)
|
976 |
+
else:
|
977 |
+
x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x)
|
978 |
+
|
979 |
+
else:
|
980 |
+
x = self.layers(x)
|
981 |
+
|
982 |
+
# scale image from [0, 1] to [-1, 1] to match diffusers convention
|
983 |
+
return x.mul(2).sub(1)
|
diffusers/models/controlnet.py
ADDED
@@ -0,0 +1,862 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from torch import nn
|
19 |
+
from torch.nn import functional as F
|
20 |
+
|
21 |
+
from ..configuration_utils import ConfigMixin, register_to_config
|
22 |
+
from ..loaders import FromOriginalControlnetMixin
|
23 |
+
from ..utils import BaseOutput, logging
|
24 |
+
from .attention_processor import (
|
25 |
+
ADDED_KV_ATTENTION_PROCESSORS,
|
26 |
+
CROSS_ATTENTION_PROCESSORS,
|
27 |
+
AttentionProcessor,
|
28 |
+
AttnAddedKVProcessor,
|
29 |
+
AttnProcessor,
|
30 |
+
)
|
31 |
+
from .embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
|
32 |
+
from .modeling_utils import ModelMixin
|
33 |
+
from .unet_2d_blocks import CrossAttnDownBlock2D, DownBlock2D, UNetMidBlock2D, UNetMidBlock2DCrossAttn, get_down_block
|
34 |
+
from .unet_2d_condition import UNet2DConditionModel
|
35 |
+
|
36 |
+
|
37 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
38 |
+
|
39 |
+
|
40 |
+
@dataclass
|
41 |
+
class ControlNetOutput(BaseOutput):
|
42 |
+
"""
|
43 |
+
The output of [`ControlNetModel`].
|
44 |
+
|
45 |
+
Args:
|
46 |
+
down_block_res_samples (`tuple[torch.Tensor]`):
|
47 |
+
A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
|
48 |
+
be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
|
49 |
+
used to condition the original UNet's downsampling activations.
|
50 |
+
mid_down_block_re_sample (`torch.Tensor`):
|
51 |
+
The activation of the midde block (the lowest sample resolution). Each tensor should be of shape
|
52 |
+
`(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
|
53 |
+
Output can be used to condition the original UNet's middle block activation.
|
54 |
+
"""
|
55 |
+
|
56 |
+
down_block_res_samples: Tuple[torch.Tensor]
|
57 |
+
mid_block_res_sample: torch.Tensor
|
58 |
+
|
59 |
+
|
60 |
+
class ControlNetConditioningEmbedding(nn.Module):
|
61 |
+
"""
|
62 |
+
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
|
63 |
+
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
|
64 |
+
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
|
65 |
+
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
|
66 |
+
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
|
67 |
+
model) to encode image-space conditions ... into feature maps ..."
|
68 |
+
"""
|
69 |
+
|
70 |
+
def __init__(
|
71 |
+
self,
|
72 |
+
conditioning_embedding_channels: int,
|
73 |
+
conditioning_channels: int = 3,
|
74 |
+
block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
|
75 |
+
):
|
76 |
+
super().__init__()
|
77 |
+
|
78 |
+
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
|
79 |
+
|
80 |
+
self.blocks = nn.ModuleList([])
|
81 |
+
|
82 |
+
for i in range(len(block_out_channels) - 1):
|
83 |
+
channel_in = block_out_channels[i]
|
84 |
+
channel_out = block_out_channels[i + 1]
|
85 |
+
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
|
86 |
+
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
|
87 |
+
|
88 |
+
self.conv_out = zero_module(
|
89 |
+
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
|
90 |
+
)
|
91 |
+
|
92 |
+
def forward(self, conditioning):
|
93 |
+
embedding = self.conv_in(conditioning)
|
94 |
+
embedding = F.silu(embedding)
|
95 |
+
|
96 |
+
for block in self.blocks:
|
97 |
+
embedding = block(embedding)
|
98 |
+
embedding = F.silu(embedding)
|
99 |
+
|
100 |
+
embedding = self.conv_out(embedding)
|
101 |
+
|
102 |
+
return embedding
|
103 |
+
|
104 |
+
|
105 |
+
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
106 |
+
"""
|
107 |
+
A ControlNet model.
|
108 |
+
|
109 |
+
Args:
|
110 |
+
in_channels (`int`, defaults to 4):
|
111 |
+
The number of channels in the input sample.
|
112 |
+
flip_sin_to_cos (`bool`, defaults to `True`):
|
113 |
+
Whether to flip the sin to cos in the time embedding.
|
114 |
+
freq_shift (`int`, defaults to 0):
|
115 |
+
The frequency shift to apply to the time embedding.
|
116 |
+
down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
|
117 |
+
The tuple of downsample blocks to use.
|
118 |
+
only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
|
119 |
+
block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
|
120 |
+
The tuple of output channels for each block.
|
121 |
+
layers_per_block (`int`, defaults to 2):
|
122 |
+
The number of layers per block.
|
123 |
+
downsample_padding (`int`, defaults to 1):
|
124 |
+
The padding to use for the downsampling convolution.
|
125 |
+
mid_block_scale_factor (`float`, defaults to 1):
|
126 |
+
The scale factor to use for the mid block.
|
127 |
+
act_fn (`str`, defaults to "silu"):
|
128 |
+
The activation function to use.
|
129 |
+
norm_num_groups (`int`, *optional*, defaults to 32):
|
130 |
+
The number of groups to use for the normalization. If None, normalization and activation layers is skipped
|
131 |
+
in post-processing.
|
132 |
+
norm_eps (`float`, defaults to 1e-5):
|
133 |
+
The epsilon to use for the normalization.
|
134 |
+
cross_attention_dim (`int`, defaults to 1280):
|
135 |
+
The dimension of the cross attention features.
|
136 |
+
transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
|
137 |
+
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
|
138 |
+
[`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
|
139 |
+
[`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
|
140 |
+
encoder_hid_dim (`int`, *optional*, defaults to None):
|
141 |
+
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
|
142 |
+
dimension to `cross_attention_dim`.
|
143 |
+
encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
|
144 |
+
If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
|
145 |
+
embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
|
146 |
+
attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
|
147 |
+
The dimension of the attention heads.
|
148 |
+
use_linear_projection (`bool`, defaults to `False`):
|
149 |
+
class_embed_type (`str`, *optional*, defaults to `None`):
|
150 |
+
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
|
151 |
+
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
|
152 |
+
addition_embed_type (`str`, *optional*, defaults to `None`):
|
153 |
+
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
|
154 |
+
"text". "text" will use the `TextTimeEmbedding` layer.
|
155 |
+
num_class_embeds (`int`, *optional*, defaults to 0):
|
156 |
+
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
|
157 |
+
class conditioning with `class_embed_type` equal to `None`.
|
158 |
+
upcast_attention (`bool`, defaults to `False`):
|
159 |
+
resnet_time_scale_shift (`str`, defaults to `"default"`):
|
160 |
+
Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
|
161 |
+
projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`):
|
162 |
+
The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when
|
163 |
+
`class_embed_type="projection"`.
|
164 |
+
controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
|
165 |
+
The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
|
166 |
+
conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`):
|
167 |
+
The tuple of output channel for each block in the `conditioning_embedding` layer.
|
168 |
+
global_pool_conditions (`bool`, defaults to `False`):
|
169 |
+
TODO(Patrick) - unused parameter.
|
170 |
+
addition_embed_type_num_heads (`int`, defaults to 64):
|
171 |
+
The number of heads to use for the `TextTimeEmbedding` layer.
|
172 |
+
"""
|
173 |
+
|
174 |
+
_supports_gradient_checkpointing = True
|
175 |
+
|
176 |
+
@register_to_config
|
177 |
+
def __init__(
|
178 |
+
self,
|
179 |
+
in_channels: int = 4,
|
180 |
+
conditioning_channels: int = 3,
|
181 |
+
flip_sin_to_cos: bool = True,
|
182 |
+
freq_shift: int = 0,
|
183 |
+
down_block_types: Tuple[str, ...] = (
|
184 |
+
"CrossAttnDownBlock2D",
|
185 |
+
"CrossAttnDownBlock2D",
|
186 |
+
"CrossAttnDownBlock2D",
|
187 |
+
"DownBlock2D",
|
188 |
+
),
|
189 |
+
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
|
190 |
+
only_cross_attention: Union[bool, Tuple[bool]] = False,
|
191 |
+
block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
|
192 |
+
layers_per_block: int = 2,
|
193 |
+
downsample_padding: int = 1,
|
194 |
+
mid_block_scale_factor: float = 1,
|
195 |
+
act_fn: str = "silu",
|
196 |
+
norm_num_groups: Optional[int] = 32,
|
197 |
+
norm_eps: float = 1e-5,
|
198 |
+
cross_attention_dim: int = 1280,
|
199 |
+
transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
|
200 |
+
encoder_hid_dim: Optional[int] = None,
|
201 |
+
encoder_hid_dim_type: Optional[str] = None,
|
202 |
+
attention_head_dim: Union[int, Tuple[int, ...]] = 8,
|
203 |
+
num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None,
|
204 |
+
use_linear_projection: bool = False,
|
205 |
+
class_embed_type: Optional[str] = None,
|
206 |
+
addition_embed_type: Optional[str] = None,
|
207 |
+
addition_time_embed_dim: Optional[int] = None,
|
208 |
+
num_class_embeds: Optional[int] = None,
|
209 |
+
upcast_attention: bool = False,
|
210 |
+
resnet_time_scale_shift: str = "default",
|
211 |
+
projection_class_embeddings_input_dim: Optional[int] = None,
|
212 |
+
controlnet_conditioning_channel_order: str = "rgb",
|
213 |
+
conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
|
214 |
+
global_pool_conditions: bool = False,
|
215 |
+
addition_embed_type_num_heads: int = 64,
|
216 |
+
):
|
217 |
+
super().__init__()
|
218 |
+
|
219 |
+
# If `num_attention_heads` is not defined (which is the case for most models)
|
220 |
+
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
|
221 |
+
# The reason for this behavior is to correct for incorrectly named variables that were introduced
|
222 |
+
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
|
223 |
+
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
|
224 |
+
# which is why we correct for the naming here.
|
225 |
+
num_attention_heads = num_attention_heads or attention_head_dim
|
226 |
+
|
227 |
+
# Check inputs
|
228 |
+
if len(block_out_channels) != len(down_block_types):
|
229 |
+
raise ValueError(
|
230 |
+
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
|
231 |
+
)
|
232 |
+
|
233 |
+
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
|
234 |
+
raise ValueError(
|
235 |
+
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
|
236 |
+
)
|
237 |
+
|
238 |
+
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
|
239 |
+
raise ValueError(
|
240 |
+
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
|
241 |
+
)
|
242 |
+
|
243 |
+
if isinstance(transformer_layers_per_block, int):
|
244 |
+
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
|
245 |
+
|
246 |
+
# input
|
247 |
+
conv_in_kernel = 3
|
248 |
+
conv_in_padding = (conv_in_kernel - 1) // 2
|
249 |
+
self.conv_in = nn.Conv2d(
|
250 |
+
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
|
251 |
+
)
|
252 |
+
|
253 |
+
# time
|
254 |
+
time_embed_dim = block_out_channels[0] * 4
|
255 |
+
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
|
256 |
+
timestep_input_dim = block_out_channels[0]
|
257 |
+
self.time_embedding = TimestepEmbedding(
|
258 |
+
timestep_input_dim,
|
259 |
+
time_embed_dim,
|
260 |
+
act_fn=act_fn,
|
261 |
+
)
|
262 |
+
|
263 |
+
if encoder_hid_dim_type is None and encoder_hid_dim is not None:
|
264 |
+
encoder_hid_dim_type = "text_proj"
|
265 |
+
self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
|
266 |
+
logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
|
267 |
+
|
268 |
+
if encoder_hid_dim is None and encoder_hid_dim_type is not None:
|
269 |
+
raise ValueError(
|
270 |
+
f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
|
271 |
+
)
|
272 |
+
|
273 |
+
if encoder_hid_dim_type == "text_proj":
|
274 |
+
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
|
275 |
+
elif encoder_hid_dim_type == "text_image_proj":
|
276 |
+
# image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
|
277 |
+
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
|
278 |
+
# case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
|
279 |
+
self.encoder_hid_proj = TextImageProjection(
|
280 |
+
text_embed_dim=encoder_hid_dim,
|
281 |
+
image_embed_dim=cross_attention_dim,
|
282 |
+
cross_attention_dim=cross_attention_dim,
|
283 |
+
)
|
284 |
+
|
285 |
+
elif encoder_hid_dim_type is not None:
|
286 |
+
raise ValueError(
|
287 |
+
f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
|
288 |
+
)
|
289 |
+
else:
|
290 |
+
self.encoder_hid_proj = None
|
291 |
+
|
292 |
+
# class embedding
|
293 |
+
if class_embed_type is None and num_class_embeds is not None:
|
294 |
+
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
|
295 |
+
elif class_embed_type == "timestep":
|
296 |
+
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
297 |
+
elif class_embed_type == "identity":
|
298 |
+
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
|
299 |
+
elif class_embed_type == "projection":
|
300 |
+
if projection_class_embeddings_input_dim is None:
|
301 |
+
raise ValueError(
|
302 |
+
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
|
303 |
+
)
|
304 |
+
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except
|
305 |
+
# 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
|
306 |
+
# 2. it projects from an arbitrary input dimension.
|
307 |
+
#
|
308 |
+
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
|
309 |
+
# When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
|
310 |
+
# As a result, `TimestepEmbedding` can be passed arbitrary vectors.
|
311 |
+
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
|
312 |
+
else:
|
313 |
+
self.class_embedding = None
|
314 |
+
|
315 |
+
if addition_embed_type == "text":
|
316 |
+
if encoder_hid_dim is not None:
|
317 |
+
text_time_embedding_from_dim = encoder_hid_dim
|
318 |
+
else:
|
319 |
+
text_time_embedding_from_dim = cross_attention_dim
|
320 |
+
|
321 |
+
self.add_embedding = TextTimeEmbedding(
|
322 |
+
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
|
323 |
+
)
|
324 |
+
elif addition_embed_type == "text_image":
|
325 |
+
# text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
|
326 |
+
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
|
327 |
+
# case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
|
328 |
+
self.add_embedding = TextImageTimeEmbedding(
|
329 |
+
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
|
330 |
+
)
|
331 |
+
elif addition_embed_type == "text_time":
|
332 |
+
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
|
333 |
+
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
|
334 |
+
|
335 |
+
elif addition_embed_type is not None:
|
336 |
+
raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
|
337 |
+
|
338 |
+
# control net conditioning embedding
|
339 |
+
self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
|
340 |
+
conditioning_embedding_channels=block_out_channels[0],
|
341 |
+
block_out_channels=conditioning_embedding_out_channels,
|
342 |
+
conditioning_channels=conditioning_channels,
|
343 |
+
)
|
344 |
+
|
345 |
+
self.down_blocks = nn.ModuleList([])
|
346 |
+
self.controlnet_down_blocks = nn.ModuleList([])
|
347 |
+
|
348 |
+
if isinstance(only_cross_attention, bool):
|
349 |
+
only_cross_attention = [only_cross_attention] * len(down_block_types)
|
350 |
+
|
351 |
+
if isinstance(attention_head_dim, int):
|
352 |
+
attention_head_dim = (attention_head_dim,) * len(down_block_types)
|
353 |
+
|
354 |
+
if isinstance(num_attention_heads, int):
|
355 |
+
num_attention_heads = (num_attention_heads,) * len(down_block_types)
|
356 |
+
|
357 |
+
# down
|
358 |
+
output_channel = block_out_channels[0]
|
359 |
+
|
360 |
+
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
|
361 |
+
controlnet_block = zero_module(controlnet_block)
|
362 |
+
self.controlnet_down_blocks.append(controlnet_block)
|
363 |
+
|
364 |
+
for i, down_block_type in enumerate(down_block_types):
|
365 |
+
input_channel = output_channel
|
366 |
+
output_channel = block_out_channels[i]
|
367 |
+
is_final_block = i == len(block_out_channels) - 1
|
368 |
+
|
369 |
+
down_block = get_down_block(
|
370 |
+
down_block_type,
|
371 |
+
num_layers=layers_per_block,
|
372 |
+
transformer_layers_per_block=transformer_layers_per_block[i],
|
373 |
+
in_channels=input_channel,
|
374 |
+
out_channels=output_channel,
|
375 |
+
temb_channels=time_embed_dim,
|
376 |
+
add_downsample=not is_final_block,
|
377 |
+
resnet_eps=norm_eps,
|
378 |
+
resnet_act_fn=act_fn,
|
379 |
+
resnet_groups=norm_num_groups,
|
380 |
+
cross_attention_dim=cross_attention_dim,
|
381 |
+
num_attention_heads=num_attention_heads[i],
|
382 |
+
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
|
383 |
+
downsample_padding=downsample_padding,
|
384 |
+
use_linear_projection=use_linear_projection,
|
385 |
+
only_cross_attention=only_cross_attention[i],
|
386 |
+
upcast_attention=upcast_attention,
|
387 |
+
resnet_time_scale_shift=resnet_time_scale_shift,
|
388 |
+
)
|
389 |
+
self.down_blocks.append(down_block)
|
390 |
+
|
391 |
+
for _ in range(layers_per_block):
|
392 |
+
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
|
393 |
+
controlnet_block = zero_module(controlnet_block)
|
394 |
+
self.controlnet_down_blocks.append(controlnet_block)
|
395 |
+
|
396 |
+
if not is_final_block:
|
397 |
+
controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
|
398 |
+
controlnet_block = zero_module(controlnet_block)
|
399 |
+
self.controlnet_down_blocks.append(controlnet_block)
|
400 |
+
|
401 |
+
# mid
|
402 |
+
mid_block_channel = block_out_channels[-1]
|
403 |
+
|
404 |
+
controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)
|
405 |
+
controlnet_block = zero_module(controlnet_block)
|
406 |
+
self.controlnet_mid_block = controlnet_block
|
407 |
+
|
408 |
+
if mid_block_type == "UNetMidBlock2DCrossAttn":
|
409 |
+
self.mid_block = UNetMidBlock2DCrossAttn(
|
410 |
+
transformer_layers_per_block=transformer_layers_per_block[-1],
|
411 |
+
in_channels=mid_block_channel,
|
412 |
+
temb_channels=time_embed_dim,
|
413 |
+
resnet_eps=norm_eps,
|
414 |
+
resnet_act_fn=act_fn,
|
415 |
+
output_scale_factor=mid_block_scale_factor,
|
416 |
+
resnet_time_scale_shift=resnet_time_scale_shift,
|
417 |
+
cross_attention_dim=cross_attention_dim,
|
418 |
+
num_attention_heads=num_attention_heads[-1],
|
419 |
+
resnet_groups=norm_num_groups,
|
420 |
+
use_linear_projection=use_linear_projection,
|
421 |
+
upcast_attention=upcast_attention,
|
422 |
+
)
|
423 |
+
elif mid_block_type == "UNetMidBlock2D":
|
424 |
+
self.mid_block = UNetMidBlock2D(
|
425 |
+
in_channels=block_out_channels[-1],
|
426 |
+
temb_channels=time_embed_dim,
|
427 |
+
num_layers=0,
|
428 |
+
resnet_eps=norm_eps,
|
429 |
+
resnet_act_fn=act_fn,
|
430 |
+
output_scale_factor=mid_block_scale_factor,
|
431 |
+
resnet_groups=norm_num_groups,
|
432 |
+
resnet_time_scale_shift=resnet_time_scale_shift,
|
433 |
+
add_attention=False,
|
434 |
+
)
|
435 |
+
else:
|
436 |
+
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
|
437 |
+
|
438 |
+
@classmethod
|
439 |
+
def from_unet(
|
440 |
+
cls,
|
441 |
+
unet: UNet2DConditionModel,
|
442 |
+
controlnet_conditioning_channel_order: str = "rgb",
|
443 |
+
conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
|
444 |
+
load_weights_from_unet: bool = True,
|
445 |
+
conditioning_channels: int = 3,
|
446 |
+
):
|
447 |
+
r"""
|
448 |
+
Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`].
|
449 |
+
|
450 |
+
Parameters:
|
451 |
+
unet (`UNet2DConditionModel`):
|
452 |
+
The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied
|
453 |
+
where applicable.
|
454 |
+
"""
|
455 |
+
transformer_layers_per_block = (
|
456 |
+
unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
|
457 |
+
)
|
458 |
+
encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None
|
459 |
+
encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None
|
460 |
+
addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None
|
461 |
+
addition_time_embed_dim = (
|
462 |
+
unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None
|
463 |
+
)
|
464 |
+
|
465 |
+
controlnet = cls(
|
466 |
+
encoder_hid_dim=encoder_hid_dim,
|
467 |
+
encoder_hid_dim_type=encoder_hid_dim_type,
|
468 |
+
addition_embed_type=addition_embed_type,
|
469 |
+
addition_time_embed_dim=addition_time_embed_dim,
|
470 |
+
transformer_layers_per_block=transformer_layers_per_block,
|
471 |
+
in_channels=unet.config.in_channels,
|
472 |
+
flip_sin_to_cos=unet.config.flip_sin_to_cos,
|
473 |
+
freq_shift=unet.config.freq_shift,
|
474 |
+
down_block_types=unet.config.down_block_types,
|
475 |
+
only_cross_attention=unet.config.only_cross_attention,
|
476 |
+
block_out_channels=unet.config.block_out_channels,
|
477 |
+
layers_per_block=unet.config.layers_per_block,
|
478 |
+
downsample_padding=unet.config.downsample_padding,
|
479 |
+
mid_block_scale_factor=unet.config.mid_block_scale_factor,
|
480 |
+
act_fn=unet.config.act_fn,
|
481 |
+
norm_num_groups=unet.config.norm_num_groups,
|
482 |
+
norm_eps=unet.config.norm_eps,
|
483 |
+
cross_attention_dim=unet.config.cross_attention_dim,
|
484 |
+
attention_head_dim=unet.config.attention_head_dim,
|
485 |
+
num_attention_heads=unet.config.num_attention_heads,
|
486 |
+
use_linear_projection=unet.config.use_linear_projection,
|
487 |
+
class_embed_type=unet.config.class_embed_type,
|
488 |
+
num_class_embeds=unet.config.num_class_embeds,
|
489 |
+
upcast_attention=unet.config.upcast_attention,
|
490 |
+
resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
|
491 |
+
projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,
|
492 |
+
mid_block_type=unet.config.mid_block_type,
|
493 |
+
controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
|
494 |
+
conditioning_embedding_out_channels=conditioning_embedding_out_channels,
|
495 |
+
conditioning_channels=conditioning_channels,
|
496 |
+
)
|
497 |
+
|
498 |
+
if load_weights_from_unet:
|
499 |
+
controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())
|
500 |
+
controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())
|
501 |
+
controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())
|
502 |
+
|
503 |
+
if controlnet.class_embedding:
|
504 |
+
controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())
|
505 |
+
|
506 |
+
controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())
|
507 |
+
controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())
|
508 |
+
|
509 |
+
return controlnet
|
510 |
+
|
511 |
+
@property
|
512 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
|
513 |
+
def attn_processors(self) -> Dict[str, AttentionProcessor]:
|
514 |
+
r"""
|
515 |
+
Returns:
|
516 |
+
`dict` of attention processors: A dictionary containing all attention processors used in the model with
|
517 |
+
indexed by its weight name.
|
518 |
+
"""
|
519 |
+
# set recursively
|
520 |
+
processors = {}
|
521 |
+
|
522 |
+
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
|
523 |
+
if hasattr(module, "get_processor"):
|
524 |
+
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
|
525 |
+
|
526 |
+
for sub_name, child in module.named_children():
|
527 |
+
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
528 |
+
|
529 |
+
return processors
|
530 |
+
|
531 |
+
for name, module in self.named_children():
|
532 |
+
fn_recursive_add_processors(name, module, processors)
|
533 |
+
|
534 |
+
return processors
|
535 |
+
|
536 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
|
537 |
+
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
|
538 |
+
r"""
|
539 |
+
Sets the attention processor to use to compute attention.
|
540 |
+
|
541 |
+
Parameters:
|
542 |
+
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
543 |
+
The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
544 |
+
for **all** `Attention` layers.
|
545 |
+
|
546 |
+
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
547 |
+
processor. This is strongly recommended when setting trainable attention processors.
|
548 |
+
|
549 |
+
"""
|
550 |
+
count = len(self.attn_processors.keys())
|
551 |
+
|
552 |
+
if isinstance(processor, dict) and len(processor) != count:
|
553 |
+
raise ValueError(
|
554 |
+
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
555 |
+
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
556 |
+
)
|
557 |
+
|
558 |
+
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
559 |
+
if hasattr(module, "set_processor"):
|
560 |
+
if not isinstance(processor, dict):
|
561 |
+
module.set_processor(processor)
|
562 |
+
else:
|
563 |
+
module.set_processor(processor.pop(f"{name}.processor"))
|
564 |
+
|
565 |
+
for sub_name, child in module.named_children():
|
566 |
+
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
567 |
+
|
568 |
+
for name, module in self.named_children():
|
569 |
+
fn_recursive_attn_processor(name, module, processor)
|
570 |
+
|
571 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
|
572 |
+
def set_default_attn_processor(self):
|
573 |
+
"""
|
574 |
+
Disables custom attention processors and sets the default attention implementation.
|
575 |
+
"""
|
576 |
+
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
577 |
+
processor = AttnAddedKVProcessor()
|
578 |
+
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
|
579 |
+
processor = AttnProcessor()
|
580 |
+
else:
|
581 |
+
raise ValueError(
|
582 |
+
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
|
583 |
+
)
|
584 |
+
|
585 |
+
self.set_attn_processor(processor)
|
586 |
+
|
587 |
+
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice
|
588 |
+
def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None:
|
589 |
+
r"""
|
590 |
+
Enable sliced attention computation.
|
591 |
+
|
592 |
+
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
|
593 |
+
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
|
594 |
+
|
595 |
+
Args:
|
596 |
+
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
|
597 |
+
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
|
598 |
+
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
|
599 |
+
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
|
600 |
+
must be a multiple of `slice_size`.
|
601 |
+
"""
|
602 |
+
sliceable_head_dims = []
|
603 |
+
|
604 |
+
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
|
605 |
+
if hasattr(module, "set_attention_slice"):
|
606 |
+
sliceable_head_dims.append(module.sliceable_head_dim)
|
607 |
+
|
608 |
+
for child in module.children():
|
609 |
+
fn_recursive_retrieve_sliceable_dims(child)
|
610 |
+
|
611 |
+
# retrieve number of attention layers
|
612 |
+
for module in self.children():
|
613 |
+
fn_recursive_retrieve_sliceable_dims(module)
|
614 |
+
|
615 |
+
num_sliceable_layers = len(sliceable_head_dims)
|
616 |
+
|
617 |
+
if slice_size == "auto":
|
618 |
+
# half the attention head size is usually a good trade-off between
|
619 |
+
# speed and memory
|
620 |
+
slice_size = [dim // 2 for dim in sliceable_head_dims]
|
621 |
+
elif slice_size == "max":
|
622 |
+
# make smallest slice possible
|
623 |
+
slice_size = num_sliceable_layers * [1]
|
624 |
+
|
625 |
+
slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
|
626 |
+
|
627 |
+
if len(slice_size) != len(sliceable_head_dims):
|
628 |
+
raise ValueError(
|
629 |
+
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
|
630 |
+
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
|
631 |
+
)
|
632 |
+
|
633 |
+
for i in range(len(slice_size)):
|
634 |
+
size = slice_size[i]
|
635 |
+
dim = sliceable_head_dims[i]
|
636 |
+
if size is not None and size > dim:
|
637 |
+
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
|
638 |
+
|
639 |
+
# Recursively walk through all the children.
|
640 |
+
# Any children which exposes the set_attention_slice method
|
641 |
+
# gets the message
|
642 |
+
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
|
643 |
+
if hasattr(module, "set_attention_slice"):
|
644 |
+
module.set_attention_slice(slice_size.pop())
|
645 |
+
|
646 |
+
for child in module.children():
|
647 |
+
fn_recursive_set_attention_slice(child, slice_size)
|
648 |
+
|
649 |
+
reversed_slice_size = list(reversed(slice_size))
|
650 |
+
for module in self.children():
|
651 |
+
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
652 |
+
|
653 |
+
def _set_gradient_checkpointing(self, module, value: bool = False) -> None:
|
654 |
+
if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):
|
655 |
+
module.gradient_checkpointing = value
|
656 |
+
|
657 |
+
def forward(
|
658 |
+
self,
|
659 |
+
sample: torch.FloatTensor,
|
660 |
+
timestep: Union[torch.Tensor, float, int],
|
661 |
+
encoder_hidden_states: torch.Tensor,
|
662 |
+
controlnet_cond: torch.FloatTensor,
|
663 |
+
conditioning_scale: float = 1.0,
|
664 |
+
class_labels: Optional[torch.Tensor] = None,
|
665 |
+
timestep_cond: Optional[torch.Tensor] = None,
|
666 |
+
attention_mask: Optional[torch.Tensor] = None,
|
667 |
+
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
|
668 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
669 |
+
guess_mode: bool = False,
|
670 |
+
return_dict: bool = True,
|
671 |
+
) -> Union[ControlNetOutput, Tuple[Tuple[torch.FloatTensor, ...], torch.FloatTensor]]:
|
672 |
+
"""
|
673 |
+
The [`ControlNetModel`] forward method.
|
674 |
+
|
675 |
+
Args:
|
676 |
+
sample (`torch.FloatTensor`):
|
677 |
+
The noisy input tensor.
|
678 |
+
timestep (`Union[torch.Tensor, float, int]`):
|
679 |
+
The number of timesteps to denoise an input.
|
680 |
+
encoder_hidden_states (`torch.Tensor`):
|
681 |
+
The encoder hidden states.
|
682 |
+
controlnet_cond (`torch.FloatTensor`):
|
683 |
+
The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
|
684 |
+
conditioning_scale (`float`, defaults to `1.0`):
|
685 |
+
The scale factor for ControlNet outputs.
|
686 |
+
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
|
687 |
+
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
|
688 |
+
timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
|
689 |
+
Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
|
690 |
+
timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
|
691 |
+
embeddings.
|
692 |
+
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
|
693 |
+
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
|
694 |
+
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
|
695 |
+
negative values to the attention scores corresponding to "discard" tokens.
|
696 |
+
added_cond_kwargs (`dict`):
|
697 |
+
Additional conditions for the Stable Diffusion XL UNet.
|
698 |
+
cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
|
699 |
+
A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
|
700 |
+
guess_mode (`bool`, defaults to `False`):
|
701 |
+
In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
|
702 |
+
you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
|
703 |
+
return_dict (`bool`, defaults to `True`):
|
704 |
+
Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
|
705 |
+
|
706 |
+
Returns:
|
707 |
+
[`~models.controlnet.ControlNetOutput`] **or** `tuple`:
|
708 |
+
If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
|
709 |
+
returned where the first element is the sample tensor.
|
710 |
+
"""
|
711 |
+
# check channel order
|
712 |
+
channel_order = self.config.controlnet_conditioning_channel_order
|
713 |
+
|
714 |
+
if channel_order == "rgb":
|
715 |
+
# in rgb order by default
|
716 |
+
...
|
717 |
+
elif channel_order == "bgr":
|
718 |
+
controlnet_cond = torch.flip(controlnet_cond, dims=[1])
|
719 |
+
else:
|
720 |
+
raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
|
721 |
+
|
722 |
+
# prepare attention_mask
|
723 |
+
if attention_mask is not None:
|
724 |
+
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
|
725 |
+
attention_mask = attention_mask.unsqueeze(1)
|
726 |
+
|
727 |
+
# 1. time
|
728 |
+
timesteps = timestep
|
729 |
+
if not torch.is_tensor(timesteps):
|
730 |
+
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
|
731 |
+
# This would be a good case for the `match` statement (Python 3.10+)
|
732 |
+
is_mps = sample.device.type == "mps"
|
733 |
+
if isinstance(timestep, float):
|
734 |
+
dtype = torch.float32 if is_mps else torch.float64
|
735 |
+
else:
|
736 |
+
dtype = torch.int32 if is_mps else torch.int64
|
737 |
+
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
738 |
+
elif len(timesteps.shape) == 0:
|
739 |
+
timesteps = timesteps[None].to(sample.device)
|
740 |
+
|
741 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
742 |
+
timesteps = timesteps.expand(sample.shape[0])
|
743 |
+
|
744 |
+
t_emb = self.time_proj(timesteps)
|
745 |
+
|
746 |
+
# timesteps does not contain any weights and will always return f32 tensors
|
747 |
+
# but time_embedding might actually be running in fp16. so we need to cast here.
|
748 |
+
# there might be better ways to encapsulate this.
|
749 |
+
t_emb = t_emb.to(dtype=sample.dtype)
|
750 |
+
|
751 |
+
emb = self.time_embedding(t_emb, timestep_cond)
|
752 |
+
aug_emb = None
|
753 |
+
|
754 |
+
if self.class_embedding is not None:
|
755 |
+
if class_labels is None:
|
756 |
+
raise ValueError("class_labels should be provided when num_class_embeds > 0")
|
757 |
+
|
758 |
+
if self.config.class_embed_type == "timestep":
|
759 |
+
class_labels = self.time_proj(class_labels)
|
760 |
+
|
761 |
+
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
|
762 |
+
emb = emb + class_emb
|
763 |
+
|
764 |
+
if self.config.addition_embed_type is not None:
|
765 |
+
if self.config.addition_embed_type == "text":
|
766 |
+
aug_emb = self.add_embedding(encoder_hidden_states)
|
767 |
+
|
768 |
+
elif self.config.addition_embed_type == "text_time":
|
769 |
+
if "text_embeds" not in added_cond_kwargs:
|
770 |
+
raise ValueError(
|
771 |
+
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
|
772 |
+
)
|
773 |
+
text_embeds = added_cond_kwargs.get("text_embeds")
|
774 |
+
if "time_ids" not in added_cond_kwargs:
|
775 |
+
raise ValueError(
|
776 |
+
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
|
777 |
+
)
|
778 |
+
time_ids = added_cond_kwargs.get("time_ids")
|
779 |
+
time_embeds = self.add_time_proj(time_ids.flatten())
|
780 |
+
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
|
781 |
+
|
782 |
+
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
|
783 |
+
add_embeds = add_embeds.to(emb.dtype)
|
784 |
+
aug_emb = self.add_embedding(add_embeds)
|
785 |
+
|
786 |
+
emb = emb + aug_emb if aug_emb is not None else emb
|
787 |
+
|
788 |
+
# 2. pre-process
|
789 |
+
sample = self.conv_in(sample)
|
790 |
+
|
791 |
+
controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
|
792 |
+
sample = sample + controlnet_cond
|
793 |
+
|
794 |
+
# 3. down
|
795 |
+
down_block_res_samples = (sample,)
|
796 |
+
for downsample_block in self.down_blocks:
|
797 |
+
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
|
798 |
+
sample, res_samples = downsample_block(
|
799 |
+
hidden_states=sample,
|
800 |
+
temb=emb,
|
801 |
+
encoder_hidden_states=encoder_hidden_states,
|
802 |
+
attention_mask=attention_mask,
|
803 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
804 |
+
)
|
805 |
+
else:
|
806 |
+
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
807 |
+
|
808 |
+
down_block_res_samples += res_samples
|
809 |
+
|
810 |
+
# 4. mid
|
811 |
+
if self.mid_block is not None:
|
812 |
+
if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
|
813 |
+
sample = self.mid_block(
|
814 |
+
sample,
|
815 |
+
emb,
|
816 |
+
encoder_hidden_states=encoder_hidden_states,
|
817 |
+
attention_mask=attention_mask,
|
818 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
819 |
+
)
|
820 |
+
else:
|
821 |
+
sample = self.mid_block(sample, emb)
|
822 |
+
|
823 |
+
# 5. Control net blocks
|
824 |
+
|
825 |
+
controlnet_down_block_res_samples = ()
|
826 |
+
|
827 |
+
for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
|
828 |
+
down_block_res_sample = controlnet_block(down_block_res_sample)
|
829 |
+
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
|
830 |
+
|
831 |
+
down_block_res_samples = controlnet_down_block_res_samples
|
832 |
+
|
833 |
+
mid_block_res_sample = self.controlnet_mid_block(sample)
|
834 |
+
|
835 |
+
# 6. scaling
|
836 |
+
if guess_mode and not self.config.global_pool_conditions:
|
837 |
+
scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
|
838 |
+
scales = scales * conditioning_scale
|
839 |
+
down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
|
840 |
+
mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
|
841 |
+
else:
|
842 |
+
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
|
843 |
+
mid_block_res_sample = mid_block_res_sample * conditioning_scale
|
844 |
+
|
845 |
+
if self.config.global_pool_conditions:
|
846 |
+
down_block_res_samples = [
|
847 |
+
torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
|
848 |
+
]
|
849 |
+
mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
|
850 |
+
|
851 |
+
if not return_dict:
|
852 |
+
return (down_block_res_samples, mid_block_res_sample)
|
853 |
+
|
854 |
+
return ControlNetOutput(
|
855 |
+
down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
|
856 |
+
)
|
857 |
+
|
858 |
+
|
859 |
+
def zero_module(module):
|
860 |
+
for p in module.parameters():
|
861 |
+
nn.init.zeros_(p)
|
862 |
+
return module
|
diffusers/models/controlnet_flax.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import Optional, Tuple, Union
|
15 |
+
|
16 |
+
import flax
|
17 |
+
import flax.linen as nn
|
18 |
+
import jax
|
19 |
+
import jax.numpy as jnp
|
20 |
+
from flax.core.frozen_dict import FrozenDict
|
21 |
+
|
22 |
+
from ..configuration_utils import ConfigMixin, flax_register_to_config
|
23 |
+
from ..utils import BaseOutput
|
24 |
+
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
|
25 |
+
from .modeling_flax_utils import FlaxModelMixin
|
26 |
+
from .unet_2d_blocks_flax import (
|
27 |
+
FlaxCrossAttnDownBlock2D,
|
28 |
+
FlaxDownBlock2D,
|
29 |
+
FlaxUNetMidBlock2DCrossAttn,
|
30 |
+
)
|
31 |
+
|
32 |
+
|
33 |
+
@flax.struct.dataclass
|
34 |
+
class FlaxControlNetOutput(BaseOutput):
|
35 |
+
"""
|
36 |
+
The output of [`FlaxControlNetModel`].
|
37 |
+
|
38 |
+
Args:
|
39 |
+
down_block_res_samples (`jnp.ndarray`):
|
40 |
+
mid_block_res_sample (`jnp.ndarray`):
|
41 |
+
"""
|
42 |
+
|
43 |
+
down_block_res_samples: jnp.ndarray
|
44 |
+
mid_block_res_sample: jnp.ndarray
|
45 |
+
|
46 |
+
|
47 |
+
class FlaxControlNetConditioningEmbedding(nn.Module):
|
48 |
+
conditioning_embedding_channels: int
|
49 |
+
block_out_channels: Tuple[int, ...] = (16, 32, 96, 256)
|
50 |
+
dtype: jnp.dtype = jnp.float32
|
51 |
+
|
52 |
+
def setup(self) -> None:
|
53 |
+
self.conv_in = nn.Conv(
|
54 |
+
self.block_out_channels[0],
|
55 |
+
kernel_size=(3, 3),
|
56 |
+
padding=((1, 1), (1, 1)),
|
57 |
+
dtype=self.dtype,
|
58 |
+
)
|
59 |
+
|
60 |
+
blocks = []
|
61 |
+
for i in range(len(self.block_out_channels) - 1):
|
62 |
+
channel_in = self.block_out_channels[i]
|
63 |
+
channel_out = self.block_out_channels[i + 1]
|
64 |
+
conv1 = nn.Conv(
|
65 |
+
channel_in,
|
66 |
+
kernel_size=(3, 3),
|
67 |
+
padding=((1, 1), (1, 1)),
|
68 |
+
dtype=self.dtype,
|
69 |
+
)
|
70 |
+
blocks.append(conv1)
|
71 |
+
conv2 = nn.Conv(
|
72 |
+
channel_out,
|
73 |
+
kernel_size=(3, 3),
|
74 |
+
strides=(2, 2),
|
75 |
+
padding=((1, 1), (1, 1)),
|
76 |
+
dtype=self.dtype,
|
77 |
+
)
|
78 |
+
blocks.append(conv2)
|
79 |
+
self.blocks = blocks
|
80 |
+
|
81 |
+
self.conv_out = nn.Conv(
|
82 |
+
self.conditioning_embedding_channels,
|
83 |
+
kernel_size=(3, 3),
|
84 |
+
padding=((1, 1), (1, 1)),
|
85 |
+
kernel_init=nn.initializers.zeros_init(),
|
86 |
+
bias_init=nn.initializers.zeros_init(),
|
87 |
+
dtype=self.dtype,
|
88 |
+
)
|
89 |
+
|
90 |
+
def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray:
|
91 |
+
embedding = self.conv_in(conditioning)
|
92 |
+
embedding = nn.silu(embedding)
|
93 |
+
|
94 |
+
for block in self.blocks:
|
95 |
+
embedding = block(embedding)
|
96 |
+
embedding = nn.silu(embedding)
|
97 |
+
|
98 |
+
embedding = self.conv_out(embedding)
|
99 |
+
|
100 |
+
return embedding
|
101 |
+
|
102 |
+
|
103 |
+
@flax_register_to_config
|
104 |
+
class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin):
|
105 |
+
r"""
|
106 |
+
A ControlNet model.
|
107 |
+
|
108 |
+
This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it’s generic methods
|
109 |
+
implemented for all models (such as downloading or saving).
|
110 |
+
|
111 |
+
This model is also a Flax Linen [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
|
112 |
+
subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its
|
113 |
+
general usage and behavior.
|
114 |
+
|
115 |
+
Inherent JAX features such as the following are supported:
|
116 |
+
|
117 |
+
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
118 |
+
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
119 |
+
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
120 |
+
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
121 |
+
|
122 |
+
Parameters:
|
123 |
+
sample_size (`int`, *optional*):
|
124 |
+
The size of the input sample.
|
125 |
+
in_channels (`int`, *optional*, defaults to 4):
|
126 |
+
The number of channels in the input sample.
|
127 |
+
down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`):
|
128 |
+
The tuple of downsample blocks to use.
|
129 |
+
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
|
130 |
+
The tuple of output channels for each block.
|
131 |
+
layers_per_block (`int`, *optional*, defaults to 2):
|
132 |
+
The number of layers per block.
|
133 |
+
attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8):
|
134 |
+
The dimension of the attention heads.
|
135 |
+
num_attention_heads (`int` or `Tuple[int]`, *optional*):
|
136 |
+
The number of attention heads.
|
137 |
+
cross_attention_dim (`int`, *optional*, defaults to 768):
|
138 |
+
The dimension of the cross attention features.
|
139 |
+
dropout (`float`, *optional*, defaults to 0):
|
140 |
+
Dropout probability for down, up and bottleneck blocks.
|
141 |
+
flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
|
142 |
+
Whether to flip the sin to cos in the time embedding.
|
143 |
+
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
|
144 |
+
controlnet_conditioning_channel_order (`str`, *optional*, defaults to `rgb`):
|
145 |
+
The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
|
146 |
+
conditioning_embedding_out_channels (`tuple`, *optional*, defaults to `(16, 32, 96, 256)`):
|
147 |
+
The tuple of output channel for each block in the `conditioning_embedding` layer.
|
148 |
+
"""
|
149 |
+
|
150 |
+
sample_size: int = 32
|
151 |
+
in_channels: int = 4
|
152 |
+
down_block_types: Tuple[str, ...] = (
|
153 |
+
"CrossAttnDownBlock2D",
|
154 |
+
"CrossAttnDownBlock2D",
|
155 |
+
"CrossAttnDownBlock2D",
|
156 |
+
"DownBlock2D",
|
157 |
+
)
|
158 |
+
only_cross_attention: Union[bool, Tuple[bool, ...]] = False
|
159 |
+
block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280)
|
160 |
+
layers_per_block: int = 2
|
161 |
+
attention_head_dim: Union[int, Tuple[int, ...]] = 8
|
162 |
+
num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None
|
163 |
+
cross_attention_dim: int = 1280
|
164 |
+
dropout: float = 0.0
|
165 |
+
use_linear_projection: bool = False
|
166 |
+
dtype: jnp.dtype = jnp.float32
|
167 |
+
flip_sin_to_cos: bool = True
|
168 |
+
freq_shift: int = 0
|
169 |
+
controlnet_conditioning_channel_order: str = "rgb"
|
170 |
+
conditioning_embedding_out_channels: Tuple[int, ...] = (16, 32, 96, 256)
|
171 |
+
|
172 |
+
def init_weights(self, rng: jax.Array) -> FrozenDict:
|
173 |
+
# init input tensors
|
174 |
+
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
|
175 |
+
sample = jnp.zeros(sample_shape, dtype=jnp.float32)
|
176 |
+
timesteps = jnp.ones((1,), dtype=jnp.int32)
|
177 |
+
encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32)
|
178 |
+
controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8)
|
179 |
+
controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32)
|
180 |
+
|
181 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
182 |
+
rngs = {"params": params_rng, "dropout": dropout_rng}
|
183 |
+
|
184 |
+
return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"]
|
185 |
+
|
186 |
+
def setup(self) -> None:
|
187 |
+
block_out_channels = self.block_out_channels
|
188 |
+
time_embed_dim = block_out_channels[0] * 4
|
189 |
+
|
190 |
+
# If `num_attention_heads` is not defined (which is the case for most models)
|
191 |
+
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
|
192 |
+
# The reason for this behavior is to correct for incorrectly named variables that were introduced
|
193 |
+
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
|
194 |
+
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
|
195 |
+
# which is why we correct for the naming here.
|
196 |
+
num_attention_heads = self.num_attention_heads or self.attention_head_dim
|
197 |
+
|
198 |
+
# input
|
199 |
+
self.conv_in = nn.Conv(
|
200 |
+
block_out_channels[0],
|
201 |
+
kernel_size=(3, 3),
|
202 |
+
strides=(1, 1),
|
203 |
+
padding=((1, 1), (1, 1)),
|
204 |
+
dtype=self.dtype,
|
205 |
+
)
|
206 |
+
|
207 |
+
# time
|
208 |
+
self.time_proj = FlaxTimesteps(
|
209 |
+
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift
|
210 |
+
)
|
211 |
+
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
|
212 |
+
|
213 |
+
self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding(
|
214 |
+
conditioning_embedding_channels=block_out_channels[0],
|
215 |
+
block_out_channels=self.conditioning_embedding_out_channels,
|
216 |
+
)
|
217 |
+
|
218 |
+
only_cross_attention = self.only_cross_attention
|
219 |
+
if isinstance(only_cross_attention, bool):
|
220 |
+
only_cross_attention = (only_cross_attention,) * len(self.down_block_types)
|
221 |
+
|
222 |
+
if isinstance(num_attention_heads, int):
|
223 |
+
num_attention_heads = (num_attention_heads,) * len(self.down_block_types)
|
224 |
+
|
225 |
+
# down
|
226 |
+
down_blocks = []
|
227 |
+
controlnet_down_blocks = []
|
228 |
+
|
229 |
+
output_channel = block_out_channels[0]
|
230 |
+
|
231 |
+
controlnet_block = nn.Conv(
|
232 |
+
output_channel,
|
233 |
+
kernel_size=(1, 1),
|
234 |
+
padding="VALID",
|
235 |
+
kernel_init=nn.initializers.zeros_init(),
|
236 |
+
bias_init=nn.initializers.zeros_init(),
|
237 |
+
dtype=self.dtype,
|
238 |
+
)
|
239 |
+
controlnet_down_blocks.append(controlnet_block)
|
240 |
+
|
241 |
+
for i, down_block_type in enumerate(self.down_block_types):
|
242 |
+
input_channel = output_channel
|
243 |
+
output_channel = block_out_channels[i]
|
244 |
+
is_final_block = i == len(block_out_channels) - 1
|
245 |
+
|
246 |
+
if down_block_type == "CrossAttnDownBlock2D":
|
247 |
+
down_block = FlaxCrossAttnDownBlock2D(
|
248 |
+
in_channels=input_channel,
|
249 |
+
out_channels=output_channel,
|
250 |
+
dropout=self.dropout,
|
251 |
+
num_layers=self.layers_per_block,
|
252 |
+
num_attention_heads=num_attention_heads[i],
|
253 |
+
add_downsample=not is_final_block,
|
254 |
+
use_linear_projection=self.use_linear_projection,
|
255 |
+
only_cross_attention=only_cross_attention[i],
|
256 |
+
dtype=self.dtype,
|
257 |
+
)
|
258 |
+
else:
|
259 |
+
down_block = FlaxDownBlock2D(
|
260 |
+
in_channels=input_channel,
|
261 |
+
out_channels=output_channel,
|
262 |
+
dropout=self.dropout,
|
263 |
+
num_layers=self.layers_per_block,
|
264 |
+
add_downsample=not is_final_block,
|
265 |
+
dtype=self.dtype,
|
266 |
+
)
|
267 |
+
|
268 |
+
down_blocks.append(down_block)
|
269 |
+
|
270 |
+
for _ in range(self.layers_per_block):
|
271 |
+
controlnet_block = nn.Conv(
|
272 |
+
output_channel,
|
273 |
+
kernel_size=(1, 1),
|
274 |
+
padding="VALID",
|
275 |
+
kernel_init=nn.initializers.zeros_init(),
|
276 |
+
bias_init=nn.initializers.zeros_init(),
|
277 |
+
dtype=self.dtype,
|
278 |
+
)
|
279 |
+
controlnet_down_blocks.append(controlnet_block)
|
280 |
+
|
281 |
+
if not is_final_block:
|
282 |
+
controlnet_block = nn.Conv(
|
283 |
+
output_channel,
|
284 |
+
kernel_size=(1, 1),
|
285 |
+
padding="VALID",
|
286 |
+
kernel_init=nn.initializers.zeros_init(),
|
287 |
+
bias_init=nn.initializers.zeros_init(),
|
288 |
+
dtype=self.dtype,
|
289 |
+
)
|
290 |
+
controlnet_down_blocks.append(controlnet_block)
|
291 |
+
|
292 |
+
self.down_blocks = down_blocks
|
293 |
+
self.controlnet_down_blocks = controlnet_down_blocks
|
294 |
+
|
295 |
+
# mid
|
296 |
+
mid_block_channel = block_out_channels[-1]
|
297 |
+
self.mid_block = FlaxUNetMidBlock2DCrossAttn(
|
298 |
+
in_channels=mid_block_channel,
|
299 |
+
dropout=self.dropout,
|
300 |
+
num_attention_heads=num_attention_heads[-1],
|
301 |
+
use_linear_projection=self.use_linear_projection,
|
302 |
+
dtype=self.dtype,
|
303 |
+
)
|
304 |
+
|
305 |
+
self.controlnet_mid_block = nn.Conv(
|
306 |
+
mid_block_channel,
|
307 |
+
kernel_size=(1, 1),
|
308 |
+
padding="VALID",
|
309 |
+
kernel_init=nn.initializers.zeros_init(),
|
310 |
+
bias_init=nn.initializers.zeros_init(),
|
311 |
+
dtype=self.dtype,
|
312 |
+
)
|
313 |
+
|
314 |
+
def __call__(
|
315 |
+
self,
|
316 |
+
sample: jnp.ndarray,
|
317 |
+
timesteps: Union[jnp.ndarray, float, int],
|
318 |
+
encoder_hidden_states: jnp.ndarray,
|
319 |
+
controlnet_cond: jnp.ndarray,
|
320 |
+
conditioning_scale: float = 1.0,
|
321 |
+
return_dict: bool = True,
|
322 |
+
train: bool = False,
|
323 |
+
) -> Union[FlaxControlNetOutput, Tuple[Tuple[jnp.ndarray, ...], jnp.ndarray]]:
|
324 |
+
r"""
|
325 |
+
Args:
|
326 |
+
sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor
|
327 |
+
timestep (`jnp.ndarray` or `float` or `int`): timesteps
|
328 |
+
encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states
|
329 |
+
controlnet_cond (`jnp.ndarray`): (batch, channel, height, width) the conditional input tensor
|
330 |
+
conditioning_scale (`float`, *optional*, defaults to `1.0`): the scale factor for controlnet outputs
|
331 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
332 |
+
Whether or not to return a [`models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a
|
333 |
+
plain tuple.
|
334 |
+
train (`bool`, *optional*, defaults to `False`):
|
335 |
+
Use deterministic functions and disable dropout when not training.
|
336 |
+
|
337 |
+
Returns:
|
338 |
+
[`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`:
|
339 |
+
[`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a
|
340 |
+
`tuple`. When returning a tuple, the first element is the sample tensor.
|
341 |
+
"""
|
342 |
+
channel_order = self.controlnet_conditioning_channel_order
|
343 |
+
if channel_order == "bgr":
|
344 |
+
controlnet_cond = jnp.flip(controlnet_cond, axis=1)
|
345 |
+
|
346 |
+
# 1. time
|
347 |
+
if not isinstance(timesteps, jnp.ndarray):
|
348 |
+
timesteps = jnp.array([timesteps], dtype=jnp.int32)
|
349 |
+
elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0:
|
350 |
+
timesteps = timesteps.astype(dtype=jnp.float32)
|
351 |
+
timesteps = jnp.expand_dims(timesteps, 0)
|
352 |
+
|
353 |
+
t_emb = self.time_proj(timesteps)
|
354 |
+
t_emb = self.time_embedding(t_emb)
|
355 |
+
|
356 |
+
# 2. pre-process
|
357 |
+
sample = jnp.transpose(sample, (0, 2, 3, 1))
|
358 |
+
sample = self.conv_in(sample)
|
359 |
+
|
360 |
+
controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1))
|
361 |
+
controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
|
362 |
+
sample += controlnet_cond
|
363 |
+
|
364 |
+
# 3. down
|
365 |
+
down_block_res_samples = (sample,)
|
366 |
+
for down_block in self.down_blocks:
|
367 |
+
if isinstance(down_block, FlaxCrossAttnDownBlock2D):
|
368 |
+
sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
|
369 |
+
else:
|
370 |
+
sample, res_samples = down_block(sample, t_emb, deterministic=not train)
|
371 |
+
down_block_res_samples += res_samples
|
372 |
+
|
373 |
+
# 4. mid
|
374 |
+
sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
|
375 |
+
|
376 |
+
# 5. contronet blocks
|
377 |
+
controlnet_down_block_res_samples = ()
|
378 |
+
for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
|
379 |
+
down_block_res_sample = controlnet_block(down_block_res_sample)
|
380 |
+
controlnet_down_block_res_samples += (down_block_res_sample,)
|
381 |
+
|
382 |
+
down_block_res_samples = controlnet_down_block_res_samples
|
383 |
+
|
384 |
+
mid_block_res_sample = self.controlnet_mid_block(sample)
|
385 |
+
|
386 |
+
# 6. scaling
|
387 |
+
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
|
388 |
+
mid_block_res_sample *= conditioning_scale
|
389 |
+
|
390 |
+
if not return_dict:
|
391 |
+
return (down_block_res_samples, mid_block_res_sample)
|
392 |
+
|
393 |
+
return FlaxControlNetOutput(
|
394 |
+
down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
|
395 |
+
)
|
diffusers/models/downsampling.py
ADDED
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import Optional, Tuple
|
16 |
+
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
import torch.nn.functional as F
|
20 |
+
|
21 |
+
from ..utils import USE_PEFT_BACKEND
|
22 |
+
from .lora import LoRACompatibleConv
|
23 |
+
from .normalization import RMSNorm
|
24 |
+
from .upsampling import upfirdn2d_native
|
25 |
+
|
26 |
+
|
27 |
+
class Downsample1D(nn.Module):
|
28 |
+
"""A 1D downsampling layer with an optional convolution.
|
29 |
+
|
30 |
+
Parameters:
|
31 |
+
channels (`int`):
|
32 |
+
number of channels in the inputs and outputs.
|
33 |
+
use_conv (`bool`, default `False`):
|
34 |
+
option to use a convolution.
|
35 |
+
out_channels (`int`, optional):
|
36 |
+
number of output channels. Defaults to `channels`.
|
37 |
+
padding (`int`, default `1`):
|
38 |
+
padding for the convolution.
|
39 |
+
name (`str`, default `conv`):
|
40 |
+
name of the downsampling 1D layer.
|
41 |
+
"""
|
42 |
+
|
43 |
+
def __init__(
|
44 |
+
self,
|
45 |
+
channels: int,
|
46 |
+
use_conv: bool = False,
|
47 |
+
out_channels: Optional[int] = None,
|
48 |
+
padding: int = 1,
|
49 |
+
name: str = "conv",
|
50 |
+
):
|
51 |
+
super().__init__()
|
52 |
+
self.channels = channels
|
53 |
+
self.out_channels = out_channels or channels
|
54 |
+
self.use_conv = use_conv
|
55 |
+
self.padding = padding
|
56 |
+
stride = 2
|
57 |
+
self.name = name
|
58 |
+
|
59 |
+
if use_conv:
|
60 |
+
self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
|
61 |
+
else:
|
62 |
+
assert self.channels == self.out_channels
|
63 |
+
self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride)
|
64 |
+
|
65 |
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
66 |
+
assert inputs.shape[1] == self.channels
|
67 |
+
return self.conv(inputs)
|
68 |
+
|
69 |
+
|
70 |
+
class Downsample2D(nn.Module):
|
71 |
+
"""A 2D downsampling layer with an optional convolution.
|
72 |
+
|
73 |
+
Parameters:
|
74 |
+
channels (`int`):
|
75 |
+
number of channels in the inputs and outputs.
|
76 |
+
use_conv (`bool`, default `False`):
|
77 |
+
option to use a convolution.
|
78 |
+
out_channels (`int`, optional):
|
79 |
+
number of output channels. Defaults to `channels`.
|
80 |
+
padding (`int`, default `1`):
|
81 |
+
padding for the convolution.
|
82 |
+
name (`str`, default `conv`):
|
83 |
+
name of the downsampling 2D layer.
|
84 |
+
"""
|
85 |
+
|
86 |
+
def __init__(
|
87 |
+
self,
|
88 |
+
channels: int,
|
89 |
+
use_conv: bool = False,
|
90 |
+
out_channels: Optional[int] = None,
|
91 |
+
padding: int = 1,
|
92 |
+
name: str = "conv",
|
93 |
+
kernel_size=3,
|
94 |
+
norm_type=None,
|
95 |
+
eps=None,
|
96 |
+
elementwise_affine=None,
|
97 |
+
bias=True,
|
98 |
+
):
|
99 |
+
super().__init__()
|
100 |
+
self.channels = channels
|
101 |
+
self.out_channels = out_channels or channels
|
102 |
+
self.use_conv = use_conv
|
103 |
+
self.padding = padding
|
104 |
+
stride = 2
|
105 |
+
self.name = name
|
106 |
+
conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv
|
107 |
+
|
108 |
+
if norm_type == "ln_norm":
|
109 |
+
self.norm = nn.LayerNorm(channels, eps, elementwise_affine)
|
110 |
+
elif norm_type == "rms_norm":
|
111 |
+
self.norm = RMSNorm(channels, eps, elementwise_affine)
|
112 |
+
elif norm_type is None:
|
113 |
+
self.norm = None
|
114 |
+
else:
|
115 |
+
raise ValueError(f"unknown norm_type: {norm_type}")
|
116 |
+
|
117 |
+
if use_conv:
|
118 |
+
conv = conv_cls(
|
119 |
+
self.channels, self.out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias
|
120 |
+
)
|
121 |
+
else:
|
122 |
+
assert self.channels == self.out_channels
|
123 |
+
conv = nn.AvgPool2d(kernel_size=stride, stride=stride)
|
124 |
+
|
125 |
+
# TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
|
126 |
+
if name == "conv":
|
127 |
+
self.Conv2d_0 = conv
|
128 |
+
self.conv = conv
|
129 |
+
elif name == "Conv2d_0":
|
130 |
+
self.conv = conv
|
131 |
+
else:
|
132 |
+
self.conv = conv
|
133 |
+
|
134 |
+
def forward(self, hidden_states: torch.FloatTensor, scale: float = 1.0) -> torch.FloatTensor:
|
135 |
+
assert hidden_states.shape[1] == self.channels
|
136 |
+
|
137 |
+
if self.norm is not None:
|
138 |
+
hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
|
139 |
+
|
140 |
+
if self.use_conv and self.padding == 0:
|
141 |
+
pad = (0, 1, 0, 1)
|
142 |
+
hidden_states = F.pad(hidden_states, pad, mode="constant", value=0)
|
143 |
+
|
144 |
+
assert hidden_states.shape[1] == self.channels
|
145 |
+
|
146 |
+
if not USE_PEFT_BACKEND:
|
147 |
+
if isinstance(self.conv, LoRACompatibleConv):
|
148 |
+
hidden_states = self.conv(hidden_states, scale)
|
149 |
+
else:
|
150 |
+
hidden_states = self.conv(hidden_states)
|
151 |
+
else:
|
152 |
+
hidden_states = self.conv(hidden_states)
|
153 |
+
|
154 |
+
return hidden_states
|
155 |
+
|
156 |
+
|
157 |
+
class FirDownsample2D(nn.Module):
|
158 |
+
"""A 2D FIR downsampling layer with an optional convolution.
|
159 |
+
|
160 |
+
Parameters:
|
161 |
+
channels (`int`):
|
162 |
+
number of channels in the inputs and outputs.
|
163 |
+
use_conv (`bool`, default `False`):
|
164 |
+
option to use a convolution.
|
165 |
+
out_channels (`int`, optional):
|
166 |
+
number of output channels. Defaults to `channels`.
|
167 |
+
fir_kernel (`tuple`, default `(1, 3, 3, 1)`):
|
168 |
+
kernel for the FIR filter.
|
169 |
+
"""
|
170 |
+
|
171 |
+
def __init__(
|
172 |
+
self,
|
173 |
+
channels: Optional[int] = None,
|
174 |
+
out_channels: Optional[int] = None,
|
175 |
+
use_conv: bool = False,
|
176 |
+
fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1),
|
177 |
+
):
|
178 |
+
super().__init__()
|
179 |
+
out_channels = out_channels if out_channels else channels
|
180 |
+
if use_conv:
|
181 |
+
self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
|
182 |
+
self.fir_kernel = fir_kernel
|
183 |
+
self.use_conv = use_conv
|
184 |
+
self.out_channels = out_channels
|
185 |
+
|
186 |
+
def _downsample_2d(
|
187 |
+
self,
|
188 |
+
hidden_states: torch.FloatTensor,
|
189 |
+
weight: Optional[torch.FloatTensor] = None,
|
190 |
+
kernel: Optional[torch.FloatTensor] = None,
|
191 |
+
factor: int = 2,
|
192 |
+
gain: float = 1,
|
193 |
+
) -> torch.FloatTensor:
|
194 |
+
"""Fused `Conv2d()` followed by `downsample_2d()`.
|
195 |
+
Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
|
196 |
+
efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of
|
197 |
+
arbitrary order.
|
198 |
+
|
199 |
+
Args:
|
200 |
+
hidden_states (`torch.FloatTensor`):
|
201 |
+
Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
|
202 |
+
weight (`torch.FloatTensor`, *optional*):
|
203 |
+
Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be
|
204 |
+
performed by `inChannels = x.shape[0] // numGroups`.
|
205 |
+
kernel (`torch.FloatTensor`, *optional*):
|
206 |
+
FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which
|
207 |
+
corresponds to average pooling.
|
208 |
+
factor (`int`, *optional*, default to `2`):
|
209 |
+
Integer downsampling factor.
|
210 |
+
gain (`float`, *optional*, default to `1.0`):
|
211 |
+
Scaling factor for signal magnitude.
|
212 |
+
|
213 |
+
Returns:
|
214 |
+
output (`torch.FloatTensor`):
|
215 |
+
Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same
|
216 |
+
datatype as `x`.
|
217 |
+
"""
|
218 |
+
|
219 |
+
assert isinstance(factor, int) and factor >= 1
|
220 |
+
if kernel is None:
|
221 |
+
kernel = [1] * factor
|
222 |
+
|
223 |
+
# setup kernel
|
224 |
+
kernel = torch.tensor(kernel, dtype=torch.float32)
|
225 |
+
if kernel.ndim == 1:
|
226 |
+
kernel = torch.outer(kernel, kernel)
|
227 |
+
kernel /= torch.sum(kernel)
|
228 |
+
|
229 |
+
kernel = kernel * gain
|
230 |
+
|
231 |
+
if self.use_conv:
|
232 |
+
_, _, convH, convW = weight.shape
|
233 |
+
pad_value = (kernel.shape[0] - factor) + (convW - 1)
|
234 |
+
stride_value = [factor, factor]
|
235 |
+
upfirdn_input = upfirdn2d_native(
|
236 |
+
hidden_states,
|
237 |
+
torch.tensor(kernel, device=hidden_states.device),
|
238 |
+
pad=((pad_value + 1) // 2, pad_value // 2),
|
239 |
+
)
|
240 |
+
output = F.conv2d(upfirdn_input, weight, stride=stride_value, padding=0)
|
241 |
+
else:
|
242 |
+
pad_value = kernel.shape[0] - factor
|
243 |
+
output = upfirdn2d_native(
|
244 |
+
hidden_states,
|
245 |
+
torch.tensor(kernel, device=hidden_states.device),
|
246 |
+
down=factor,
|
247 |
+
pad=((pad_value + 1) // 2, pad_value // 2),
|
248 |
+
)
|
249 |
+
|
250 |
+
return output
|
251 |
+
|
252 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
253 |
+
if self.use_conv:
|
254 |
+
downsample_input = self._downsample_2d(hidden_states, weight=self.Conv2d_0.weight, kernel=self.fir_kernel)
|
255 |
+
hidden_states = downsample_input + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
|
256 |
+
else:
|
257 |
+
hidden_states = self._downsample_2d(hidden_states, kernel=self.fir_kernel, factor=2)
|
258 |
+
|
259 |
+
return hidden_states
|
260 |
+
|
261 |
+
|
262 |
+
# downsample/upsample layer used in k-upscaler, might be able to use FirDownsample2D/DirUpsample2D instead
|
263 |
+
class KDownsample2D(nn.Module):
|
264 |
+
r"""A 2D K-downsampling layer.
|
265 |
+
|
266 |
+
Parameters:
|
267 |
+
pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use.
|
268 |
+
"""
|
269 |
+
|
270 |
+
def __init__(self, pad_mode: str = "reflect"):
|
271 |
+
super().__init__()
|
272 |
+
self.pad_mode = pad_mode
|
273 |
+
kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]])
|
274 |
+
self.pad = kernel_1d.shape[1] // 2 - 1
|
275 |
+
self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False)
|
276 |
+
|
277 |
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
278 |
+
inputs = F.pad(inputs, (self.pad,) * 4, self.pad_mode)
|
279 |
+
weight = inputs.new_zeros(
|
280 |
+
[
|
281 |
+
inputs.shape[1],
|
282 |
+
inputs.shape[1],
|
283 |
+
self.kernel.shape[0],
|
284 |
+
self.kernel.shape[1],
|
285 |
+
]
|
286 |
+
)
|
287 |
+
indices = torch.arange(inputs.shape[1], device=inputs.device)
|
288 |
+
kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1)
|
289 |
+
weight[indices, indices] = kernel
|
290 |
+
return F.conv2d(inputs, weight, stride=2)
|
291 |
+
|
292 |
+
|
293 |
+
def downsample_2d(
|
294 |
+
hidden_states: torch.FloatTensor,
|
295 |
+
kernel: Optional[torch.FloatTensor] = None,
|
296 |
+
factor: int = 2,
|
297 |
+
gain: float = 1,
|
298 |
+
) -> torch.FloatTensor:
|
299 |
+
r"""Downsample2D a batch of 2D images with the given filter.
|
300 |
+
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the
|
301 |
+
given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the
|
302 |
+
specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its
|
303 |
+
shape is a multiple of the downsampling factor.
|
304 |
+
|
305 |
+
Args:
|
306 |
+
hidden_states (`torch.FloatTensor`)
|
307 |
+
Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
|
308 |
+
kernel (`torch.FloatTensor`, *optional*):
|
309 |
+
FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which
|
310 |
+
corresponds to average pooling.
|
311 |
+
factor (`int`, *optional*, default to `2`):
|
312 |
+
Integer downsampling factor.
|
313 |
+
gain (`float`, *optional*, default to `1.0`):
|
314 |
+
Scaling factor for signal magnitude.
|
315 |
+
|
316 |
+
Returns:
|
317 |
+
output (`torch.FloatTensor`):
|
318 |
+
Tensor of the shape `[N, C, H // factor, W // factor]`
|
319 |
+
"""
|
320 |
+
|
321 |
+
assert isinstance(factor, int) and factor >= 1
|
322 |
+
if kernel is None:
|
323 |
+
kernel = [1] * factor
|
324 |
+
|
325 |
+
kernel = torch.tensor(kernel, dtype=torch.float32)
|
326 |
+
if kernel.ndim == 1:
|
327 |
+
kernel = torch.outer(kernel, kernel)
|
328 |
+
kernel /= torch.sum(kernel)
|
329 |
+
|
330 |
+
kernel = kernel * gain
|
331 |
+
pad_value = kernel.shape[0] - factor
|
332 |
+
output = upfirdn2d_native(
|
333 |
+
hidden_states,
|
334 |
+
kernel.to(device=hidden_states.device),
|
335 |
+
down=factor,
|
336 |
+
pad=((pad_value + 1) // 2, pad_value // 2),
|
337 |
+
)
|
338 |
+
return output
|
diffusers/models/dual_transformer_2d.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import Optional
|
15 |
+
|
16 |
+
from torch import nn
|
17 |
+
|
18 |
+
from .transformer_2d import Transformer2DModel, Transformer2DModelOutput
|
19 |
+
|
20 |
+
|
21 |
+
class DualTransformer2DModel(nn.Module):
|
22 |
+
"""
|
23 |
+
Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.
|
24 |
+
|
25 |
+
Parameters:
|
26 |
+
num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
|
27 |
+
attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
|
28 |
+
in_channels (`int`, *optional*):
|
29 |
+
Pass if the input is continuous. The number of channels in the input and output.
|
30 |
+
num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
|
31 |
+
dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.
|
32 |
+
cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
|
33 |
+
sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
|
34 |
+
Note that this is fixed at training time as it is used for learning a number of position embeddings. See
|
35 |
+
`ImagePositionalEmbeddings`.
|
36 |
+
num_vector_embeds (`int`, *optional*):
|
37 |
+
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
|
38 |
+
Includes the class for the masked latent pixel.
|
39 |
+
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
40 |
+
num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
|
41 |
+
The number of diffusion steps used during training. Note that this is fixed at training time as it is used
|
42 |
+
to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
|
43 |
+
up to but not more than steps than `num_embeds_ada_norm`.
|
44 |
+
attention_bias (`bool`, *optional*):
|
45 |
+
Configure if the TransformerBlocks' attention should contain a bias parameter.
|
46 |
+
"""
|
47 |
+
|
48 |
+
def __init__(
|
49 |
+
self,
|
50 |
+
num_attention_heads: int = 16,
|
51 |
+
attention_head_dim: int = 88,
|
52 |
+
in_channels: Optional[int] = None,
|
53 |
+
num_layers: int = 1,
|
54 |
+
dropout: float = 0.0,
|
55 |
+
norm_num_groups: int = 32,
|
56 |
+
cross_attention_dim: Optional[int] = None,
|
57 |
+
attention_bias: bool = False,
|
58 |
+
sample_size: Optional[int] = None,
|
59 |
+
num_vector_embeds: Optional[int] = None,
|
60 |
+
activation_fn: str = "geglu",
|
61 |
+
num_embeds_ada_norm: Optional[int] = None,
|
62 |
+
):
|
63 |
+
super().__init__()
|
64 |
+
self.transformers = nn.ModuleList(
|
65 |
+
[
|
66 |
+
Transformer2DModel(
|
67 |
+
num_attention_heads=num_attention_heads,
|
68 |
+
attention_head_dim=attention_head_dim,
|
69 |
+
in_channels=in_channels,
|
70 |
+
num_layers=num_layers,
|
71 |
+
dropout=dropout,
|
72 |
+
norm_num_groups=norm_num_groups,
|
73 |
+
cross_attention_dim=cross_attention_dim,
|
74 |
+
attention_bias=attention_bias,
|
75 |
+
sample_size=sample_size,
|
76 |
+
num_vector_embeds=num_vector_embeds,
|
77 |
+
activation_fn=activation_fn,
|
78 |
+
num_embeds_ada_norm=num_embeds_ada_norm,
|
79 |
+
)
|
80 |
+
for _ in range(2)
|
81 |
+
]
|
82 |
+
)
|
83 |
+
|
84 |
+
# Variables that can be set by a pipeline:
|
85 |
+
|
86 |
+
# The ratio of transformer1 to transformer2's output states to be combined during inference
|
87 |
+
self.mix_ratio = 0.5
|
88 |
+
|
89 |
+
# The shape of `encoder_hidden_states` is expected to be
|
90 |
+
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
|
91 |
+
self.condition_lengths = [77, 257]
|
92 |
+
|
93 |
+
# Which transformer to use to encode which condition.
|
94 |
+
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
|
95 |
+
self.transformer_index_for_condition = [1, 0]
|
96 |
+
|
97 |
+
def forward(
|
98 |
+
self,
|
99 |
+
hidden_states,
|
100 |
+
encoder_hidden_states,
|
101 |
+
timestep=None,
|
102 |
+
attention_mask=None,
|
103 |
+
cross_attention_kwargs=None,
|
104 |
+
return_dict: bool = True,
|
105 |
+
):
|
106 |
+
"""
|
107 |
+
Args:
|
108 |
+
hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
|
109 |
+
When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input
|
110 |
+
hidden_states.
|
111 |
+
encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
|
112 |
+
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
|
113 |
+
self-attention.
|
114 |
+
timestep ( `torch.long`, *optional*):
|
115 |
+
Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
|
116 |
+
attention_mask (`torch.FloatTensor`, *optional*):
|
117 |
+
Optional attention mask to be applied in Attention.
|
118 |
+
cross_attention_kwargs (`dict`, *optional*):
|
119 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
120 |
+
`self.processor` in
|
121 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
122 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
123 |
+
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
|
124 |
+
|
125 |
+
Returns:
|
126 |
+
[`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
|
127 |
+
[`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
128 |
+
returning a tuple, the first element is the sample tensor.
|
129 |
+
"""
|
130 |
+
input_states = hidden_states
|
131 |
+
|
132 |
+
encoded_states = []
|
133 |
+
tokens_start = 0
|
134 |
+
# attention_mask is not used yet
|
135 |
+
for i in range(2):
|
136 |
+
# for each of the two transformers, pass the corresponding condition tokens
|
137 |
+
condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
|
138 |
+
transformer_index = self.transformer_index_for_condition[i]
|
139 |
+
encoded_state = self.transformers[transformer_index](
|
140 |
+
input_states,
|
141 |
+
encoder_hidden_states=condition_state,
|
142 |
+
timestep=timestep,
|
143 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
144 |
+
return_dict=False,
|
145 |
+
)[0]
|
146 |
+
encoded_states.append(encoded_state - input_states)
|
147 |
+
tokens_start += self.condition_lengths[i]
|
148 |
+
|
149 |
+
output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
|
150 |
+
output_states = output_states + input_states
|
151 |
+
|
152 |
+
if not return_dict:
|
153 |
+
return (output_states,)
|
154 |
+
|
155 |
+
return Transformer2DModelOutput(sample=output_states)
|
diffusers/models/embeddings.py
ADDED
@@ -0,0 +1,880 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import math
|
15 |
+
from typing import Optional
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import torch
|
19 |
+
from torch import nn
|
20 |
+
|
21 |
+
from ..utils import USE_PEFT_BACKEND
|
22 |
+
from .activations import get_activation
|
23 |
+
from .attention_processor import Attention
|
24 |
+
from .lora import LoRACompatibleLinear
|
25 |
+
|
26 |
+
|
27 |
+
def get_timestep_embedding(
|
28 |
+
timesteps: torch.Tensor,
|
29 |
+
embedding_dim: int,
|
30 |
+
flip_sin_to_cos: bool = False,
|
31 |
+
downscale_freq_shift: float = 1,
|
32 |
+
scale: float = 1,
|
33 |
+
max_period: int = 10000,
|
34 |
+
):
|
35 |
+
"""
|
36 |
+
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
|
37 |
+
|
38 |
+
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
39 |
+
These may be fractional.
|
40 |
+
:param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
|
41 |
+
embeddings. :return: an [N x dim] Tensor of positional embeddings.
|
42 |
+
"""
|
43 |
+
assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
|
44 |
+
|
45 |
+
half_dim = embedding_dim // 2
|
46 |
+
exponent = -math.log(max_period) * torch.arange(
|
47 |
+
start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
|
48 |
+
)
|
49 |
+
exponent = exponent / (half_dim - downscale_freq_shift)
|
50 |
+
|
51 |
+
emb = torch.exp(exponent)
|
52 |
+
emb = timesteps[:, None].float() * emb[None, :]
|
53 |
+
|
54 |
+
# scale embeddings
|
55 |
+
emb = scale * emb
|
56 |
+
|
57 |
+
# concat sine and cosine embeddings
|
58 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
|
59 |
+
|
60 |
+
# flip sine and cosine embeddings
|
61 |
+
if flip_sin_to_cos:
|
62 |
+
emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
|
63 |
+
|
64 |
+
# zero pad
|
65 |
+
if embedding_dim % 2 == 1:
|
66 |
+
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
|
67 |
+
return emb
|
68 |
+
|
69 |
+
|
70 |
+
def get_2d_sincos_pos_embed(
|
71 |
+
embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16
|
72 |
+
):
|
73 |
+
"""
|
74 |
+
grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or
|
75 |
+
[1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
76 |
+
"""
|
77 |
+
if isinstance(grid_size, int):
|
78 |
+
grid_size = (grid_size, grid_size)
|
79 |
+
|
80 |
+
grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale
|
81 |
+
grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale
|
82 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
83 |
+
grid = np.stack(grid, axis=0)
|
84 |
+
|
85 |
+
grid = grid.reshape([2, 1, grid_size[1], grid_size[0]])
|
86 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
87 |
+
if cls_token and extra_tokens > 0:
|
88 |
+
pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
|
89 |
+
return pos_embed
|
90 |
+
|
91 |
+
|
92 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
93 |
+
if embed_dim % 2 != 0:
|
94 |
+
raise ValueError("embed_dim must be divisible by 2")
|
95 |
+
|
96 |
+
# use half of dimensions to encode grid_h
|
97 |
+
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
|
98 |
+
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
|
99 |
+
|
100 |
+
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
|
101 |
+
return emb
|
102 |
+
|
103 |
+
|
104 |
+
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
105 |
+
"""
|
106 |
+
embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)
|
107 |
+
"""
|
108 |
+
if embed_dim % 2 != 0:
|
109 |
+
raise ValueError("embed_dim must be divisible by 2")
|
110 |
+
|
111 |
+
omega = np.arange(embed_dim // 2, dtype=np.float64)
|
112 |
+
omega /= embed_dim / 2.0
|
113 |
+
omega = 1.0 / 10000**omega # (D/2,)
|
114 |
+
|
115 |
+
pos = pos.reshape(-1) # (M,)
|
116 |
+
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
|
117 |
+
|
118 |
+
emb_sin = np.sin(out) # (M, D/2)
|
119 |
+
emb_cos = np.cos(out) # (M, D/2)
|
120 |
+
|
121 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
122 |
+
return emb
|
123 |
+
|
124 |
+
|
125 |
+
class PatchEmbed(nn.Module):
|
126 |
+
"""2D Image to Patch Embedding"""
|
127 |
+
|
128 |
+
def __init__(
|
129 |
+
self,
|
130 |
+
height=224,
|
131 |
+
width=224,
|
132 |
+
patch_size=16,
|
133 |
+
in_channels=3,
|
134 |
+
embed_dim=768,
|
135 |
+
layer_norm=False,
|
136 |
+
flatten=True,
|
137 |
+
bias=True,
|
138 |
+
interpolation_scale=1,
|
139 |
+
):
|
140 |
+
super().__init__()
|
141 |
+
|
142 |
+
num_patches = (height // patch_size) * (width // patch_size)
|
143 |
+
self.flatten = flatten
|
144 |
+
self.layer_norm = layer_norm
|
145 |
+
|
146 |
+
self.proj = nn.Conv2d(
|
147 |
+
in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias
|
148 |
+
)
|
149 |
+
if layer_norm:
|
150 |
+
self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6)
|
151 |
+
else:
|
152 |
+
self.norm = None
|
153 |
+
|
154 |
+
self.patch_size = patch_size
|
155 |
+
# See:
|
156 |
+
# https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L161
|
157 |
+
self.height, self.width = height // patch_size, width // patch_size
|
158 |
+
self.base_size = height // patch_size
|
159 |
+
self.interpolation_scale = interpolation_scale
|
160 |
+
pos_embed = get_2d_sincos_pos_embed(
|
161 |
+
embed_dim, int(num_patches**0.5), base_size=self.base_size, interpolation_scale=self.interpolation_scale
|
162 |
+
)
|
163 |
+
self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False)
|
164 |
+
|
165 |
+
def forward(self, latent):
|
166 |
+
height, width = latent.shape[-2] // self.patch_size, latent.shape[-1] // self.patch_size
|
167 |
+
|
168 |
+
latent = self.proj(latent)
|
169 |
+
if self.flatten:
|
170 |
+
latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC
|
171 |
+
if self.layer_norm:
|
172 |
+
latent = self.norm(latent)
|
173 |
+
|
174 |
+
# Interpolate positional embeddings if needed.
|
175 |
+
# (For PixArt-Alpha: https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L162C151-L162C160)
|
176 |
+
if self.height != height or self.width != width:
|
177 |
+
pos_embed = get_2d_sincos_pos_embed(
|
178 |
+
embed_dim=self.pos_embed.shape[-1],
|
179 |
+
grid_size=(height, width),
|
180 |
+
base_size=self.base_size,
|
181 |
+
interpolation_scale=self.interpolation_scale,
|
182 |
+
)
|
183 |
+
pos_embed = torch.from_numpy(pos_embed)
|
184 |
+
pos_embed = pos_embed.float().unsqueeze(0).to(latent.device)
|
185 |
+
else:
|
186 |
+
pos_embed = self.pos_embed
|
187 |
+
|
188 |
+
return (latent + pos_embed).to(latent.dtype)
|
189 |
+
|
190 |
+
|
191 |
+
class TimestepEmbedding(nn.Module):
|
192 |
+
def __init__(
|
193 |
+
self,
|
194 |
+
in_channels: int,
|
195 |
+
time_embed_dim: int,
|
196 |
+
act_fn: str = "silu",
|
197 |
+
out_dim: int = None,
|
198 |
+
post_act_fn: Optional[str] = None,
|
199 |
+
cond_proj_dim=None,
|
200 |
+
sample_proj_bias=True,
|
201 |
+
):
|
202 |
+
super().__init__()
|
203 |
+
linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear
|
204 |
+
|
205 |
+
self.linear_1 = linear_cls(in_channels, time_embed_dim, sample_proj_bias)
|
206 |
+
|
207 |
+
if cond_proj_dim is not None:
|
208 |
+
self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
|
209 |
+
else:
|
210 |
+
self.cond_proj = None
|
211 |
+
|
212 |
+
self.act = get_activation(act_fn)
|
213 |
+
|
214 |
+
if out_dim is not None:
|
215 |
+
time_embed_dim_out = out_dim
|
216 |
+
else:
|
217 |
+
time_embed_dim_out = time_embed_dim
|
218 |
+
self.linear_2 = linear_cls(time_embed_dim, time_embed_dim_out, sample_proj_bias)
|
219 |
+
|
220 |
+
if post_act_fn is None:
|
221 |
+
self.post_act = None
|
222 |
+
else:
|
223 |
+
self.post_act = get_activation(post_act_fn)
|
224 |
+
|
225 |
+
def forward(self, sample, condition=None):
|
226 |
+
if condition is not None:
|
227 |
+
sample = sample + self.cond_proj(condition)
|
228 |
+
sample = self.linear_1(sample)
|
229 |
+
|
230 |
+
if self.act is not None:
|
231 |
+
sample = self.act(sample)
|
232 |
+
|
233 |
+
sample = self.linear_2(sample)
|
234 |
+
|
235 |
+
if self.post_act is not None:
|
236 |
+
sample = self.post_act(sample)
|
237 |
+
return sample
|
238 |
+
|
239 |
+
|
240 |
+
class Timesteps(nn.Module):
|
241 |
+
def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):
|
242 |
+
super().__init__()
|
243 |
+
self.num_channels = num_channels
|
244 |
+
self.flip_sin_to_cos = flip_sin_to_cos
|
245 |
+
self.downscale_freq_shift = downscale_freq_shift
|
246 |
+
|
247 |
+
def forward(self, timesteps):
|
248 |
+
t_emb = get_timestep_embedding(
|
249 |
+
timesteps,
|
250 |
+
self.num_channels,
|
251 |
+
flip_sin_to_cos=self.flip_sin_to_cos,
|
252 |
+
downscale_freq_shift=self.downscale_freq_shift,
|
253 |
+
)
|
254 |
+
return t_emb
|
255 |
+
|
256 |
+
|
257 |
+
class GaussianFourierProjection(nn.Module):
|
258 |
+
"""Gaussian Fourier embeddings for noise levels."""
|
259 |
+
|
260 |
+
def __init__(
|
261 |
+
self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False
|
262 |
+
):
|
263 |
+
super().__init__()
|
264 |
+
self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
|
265 |
+
self.log = log
|
266 |
+
self.flip_sin_to_cos = flip_sin_to_cos
|
267 |
+
|
268 |
+
if set_W_to_weight:
|
269 |
+
# to delete later
|
270 |
+
self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
|
271 |
+
|
272 |
+
self.weight = self.W
|
273 |
+
|
274 |
+
def forward(self, x):
|
275 |
+
if self.log:
|
276 |
+
x = torch.log(x)
|
277 |
+
|
278 |
+
x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi
|
279 |
+
|
280 |
+
if self.flip_sin_to_cos:
|
281 |
+
out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1)
|
282 |
+
else:
|
283 |
+
out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
|
284 |
+
return out
|
285 |
+
|
286 |
+
|
287 |
+
class SinusoidalPositionalEmbedding(nn.Module):
|
288 |
+
"""Apply positional information to a sequence of embeddings.
|
289 |
+
|
290 |
+
Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to
|
291 |
+
them
|
292 |
+
|
293 |
+
Args:
|
294 |
+
embed_dim: (int): Dimension of the positional embedding.
|
295 |
+
max_seq_length: Maximum sequence length to apply positional embeddings
|
296 |
+
|
297 |
+
"""
|
298 |
+
|
299 |
+
def __init__(self, embed_dim: int, max_seq_length: int = 32):
|
300 |
+
super().__init__()
|
301 |
+
position = torch.arange(max_seq_length).unsqueeze(1)
|
302 |
+
div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim))
|
303 |
+
pe = torch.zeros(1, max_seq_length, embed_dim)
|
304 |
+
pe[0, :, 0::2] = torch.sin(position * div_term)
|
305 |
+
pe[0, :, 1::2] = torch.cos(position * div_term)
|
306 |
+
self.register_buffer("pe", pe)
|
307 |
+
|
308 |
+
def forward(self, x):
|
309 |
+
_, seq_length, _ = x.shape
|
310 |
+
x = x + self.pe[:, :seq_length]
|
311 |
+
return x
|
312 |
+
|
313 |
+
|
314 |
+
class ImagePositionalEmbeddings(nn.Module):
|
315 |
+
"""
|
316 |
+
Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the
|
317 |
+
height and width of the latent space.
|
318 |
+
|
319 |
+
For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092
|
320 |
+
|
321 |
+
For VQ-diffusion:
|
322 |
+
|
323 |
+
Output vector embeddings are used as input for the transformer.
|
324 |
+
|
325 |
+
Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE.
|
326 |
+
|
327 |
+
Args:
|
328 |
+
num_embed (`int`):
|
329 |
+
Number of embeddings for the latent pixels embeddings.
|
330 |
+
height (`int`):
|
331 |
+
Height of the latent image i.e. the number of height embeddings.
|
332 |
+
width (`int`):
|
333 |
+
Width of the latent image i.e. the number of width embeddings.
|
334 |
+
embed_dim (`int`):
|
335 |
+
Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings.
|
336 |
+
"""
|
337 |
+
|
338 |
+
def __init__(
|
339 |
+
self,
|
340 |
+
num_embed: int,
|
341 |
+
height: int,
|
342 |
+
width: int,
|
343 |
+
embed_dim: int,
|
344 |
+
):
|
345 |
+
super().__init__()
|
346 |
+
|
347 |
+
self.height = height
|
348 |
+
self.width = width
|
349 |
+
self.num_embed = num_embed
|
350 |
+
self.embed_dim = embed_dim
|
351 |
+
|
352 |
+
self.emb = nn.Embedding(self.num_embed, embed_dim)
|
353 |
+
self.height_emb = nn.Embedding(self.height, embed_dim)
|
354 |
+
self.width_emb = nn.Embedding(self.width, embed_dim)
|
355 |
+
|
356 |
+
def forward(self, index):
|
357 |
+
emb = self.emb(index)
|
358 |
+
|
359 |
+
height_emb = self.height_emb(torch.arange(self.height, device=index.device).view(1, self.height))
|
360 |
+
|
361 |
+
# 1 x H x D -> 1 x H x 1 x D
|
362 |
+
height_emb = height_emb.unsqueeze(2)
|
363 |
+
|
364 |
+
width_emb = self.width_emb(torch.arange(self.width, device=index.device).view(1, self.width))
|
365 |
+
|
366 |
+
# 1 x W x D -> 1 x 1 x W x D
|
367 |
+
width_emb = width_emb.unsqueeze(1)
|
368 |
+
|
369 |
+
pos_emb = height_emb + width_emb
|
370 |
+
|
371 |
+
# 1 x H x W x D -> 1 x L xD
|
372 |
+
pos_emb = pos_emb.view(1, self.height * self.width, -1)
|
373 |
+
|
374 |
+
emb = emb + pos_emb[:, : emb.shape[1], :]
|
375 |
+
|
376 |
+
return emb
|
377 |
+
|
378 |
+
|
379 |
+
class LabelEmbedding(nn.Module):
|
380 |
+
"""
|
381 |
+
Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
|
382 |
+
|
383 |
+
Args:
|
384 |
+
num_classes (`int`): The number of classes.
|
385 |
+
hidden_size (`int`): The size of the vector embeddings.
|
386 |
+
dropout_prob (`float`): The probability of dropping a label.
|
387 |
+
"""
|
388 |
+
|
389 |
+
def __init__(self, num_classes, hidden_size, dropout_prob):
|
390 |
+
super().__init__()
|
391 |
+
use_cfg_embedding = dropout_prob > 0
|
392 |
+
self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
|
393 |
+
self.num_classes = num_classes
|
394 |
+
self.dropout_prob = dropout_prob
|
395 |
+
|
396 |
+
def token_drop(self, labels, force_drop_ids=None):
|
397 |
+
"""
|
398 |
+
Drops labels to enable classifier-free guidance.
|
399 |
+
"""
|
400 |
+
if force_drop_ids is None:
|
401 |
+
drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
|
402 |
+
else:
|
403 |
+
drop_ids = torch.tensor(force_drop_ids == 1)
|
404 |
+
labels = torch.where(drop_ids, self.num_classes, labels)
|
405 |
+
return labels
|
406 |
+
|
407 |
+
def forward(self, labels: torch.LongTensor, force_drop_ids=None):
|
408 |
+
use_dropout = self.dropout_prob > 0
|
409 |
+
if (self.training and use_dropout) or (force_drop_ids is not None):
|
410 |
+
labels = self.token_drop(labels, force_drop_ids)
|
411 |
+
embeddings = self.embedding_table(labels)
|
412 |
+
return embeddings
|
413 |
+
|
414 |
+
|
415 |
+
class TextImageProjection(nn.Module):
|
416 |
+
def __init__(
|
417 |
+
self,
|
418 |
+
text_embed_dim: int = 1024,
|
419 |
+
image_embed_dim: int = 768,
|
420 |
+
cross_attention_dim: int = 768,
|
421 |
+
num_image_text_embeds: int = 10,
|
422 |
+
):
|
423 |
+
super().__init__()
|
424 |
+
|
425 |
+
self.num_image_text_embeds = num_image_text_embeds
|
426 |
+
self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim)
|
427 |
+
self.text_proj = nn.Linear(text_embed_dim, cross_attention_dim)
|
428 |
+
|
429 |
+
def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor):
|
430 |
+
batch_size = text_embeds.shape[0]
|
431 |
+
|
432 |
+
# image
|
433 |
+
image_text_embeds = self.image_embeds(image_embeds)
|
434 |
+
image_text_embeds = image_text_embeds.reshape(batch_size, self.num_image_text_embeds, -1)
|
435 |
+
|
436 |
+
# text
|
437 |
+
text_embeds = self.text_proj(text_embeds)
|
438 |
+
|
439 |
+
return torch.cat([image_text_embeds, text_embeds], dim=1)
|
440 |
+
|
441 |
+
|
442 |
+
class ImageProjection(nn.Module):
|
443 |
+
def __init__(
|
444 |
+
self,
|
445 |
+
image_embed_dim: int = 768,
|
446 |
+
cross_attention_dim: int = 768,
|
447 |
+
num_image_text_embeds: int = 32,
|
448 |
+
):
|
449 |
+
super().__init__()
|
450 |
+
|
451 |
+
self.num_image_text_embeds = num_image_text_embeds
|
452 |
+
self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim)
|
453 |
+
self.norm = nn.LayerNorm(cross_attention_dim)
|
454 |
+
|
455 |
+
def forward(self, image_embeds: torch.FloatTensor):
|
456 |
+
batch_size = image_embeds.shape[0]
|
457 |
+
|
458 |
+
# image
|
459 |
+
image_embeds = self.image_embeds(image_embeds)
|
460 |
+
image_embeds = image_embeds.reshape(batch_size, self.num_image_text_embeds, -1)
|
461 |
+
image_embeds = self.norm(image_embeds)
|
462 |
+
return image_embeds
|
463 |
+
|
464 |
+
|
465 |
+
class IPAdapterFullImageProjection(nn.Module):
|
466 |
+
def __init__(self, image_embed_dim=1024, cross_attention_dim=1024):
|
467 |
+
super().__init__()
|
468 |
+
from .attention import FeedForward
|
469 |
+
|
470 |
+
self.ff = FeedForward(image_embed_dim, cross_attention_dim, mult=1, activation_fn="gelu")
|
471 |
+
self.norm = nn.LayerNorm(cross_attention_dim)
|
472 |
+
|
473 |
+
def forward(self, image_embeds: torch.FloatTensor):
|
474 |
+
return self.norm(self.ff(image_embeds))
|
475 |
+
|
476 |
+
|
477 |
+
class CombinedTimestepLabelEmbeddings(nn.Module):
|
478 |
+
def __init__(self, num_classes, embedding_dim, class_dropout_prob=0.1):
|
479 |
+
super().__init__()
|
480 |
+
|
481 |
+
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1)
|
482 |
+
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
|
483 |
+
self.class_embedder = LabelEmbedding(num_classes, embedding_dim, class_dropout_prob)
|
484 |
+
|
485 |
+
def forward(self, timestep, class_labels, hidden_dtype=None):
|
486 |
+
timesteps_proj = self.time_proj(timestep)
|
487 |
+
timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
|
488 |
+
|
489 |
+
class_labels = self.class_embedder(class_labels) # (N, D)
|
490 |
+
|
491 |
+
conditioning = timesteps_emb + class_labels # (N, D)
|
492 |
+
|
493 |
+
return conditioning
|
494 |
+
|
495 |
+
|
496 |
+
class TextTimeEmbedding(nn.Module):
|
497 |
+
def __init__(self, encoder_dim: int, time_embed_dim: int, num_heads: int = 64):
|
498 |
+
super().__init__()
|
499 |
+
self.norm1 = nn.LayerNorm(encoder_dim)
|
500 |
+
self.pool = AttentionPooling(num_heads, encoder_dim)
|
501 |
+
self.proj = nn.Linear(encoder_dim, time_embed_dim)
|
502 |
+
self.norm2 = nn.LayerNorm(time_embed_dim)
|
503 |
+
|
504 |
+
def forward(self, hidden_states):
|
505 |
+
hidden_states = self.norm1(hidden_states)
|
506 |
+
hidden_states = self.pool(hidden_states)
|
507 |
+
hidden_states = self.proj(hidden_states)
|
508 |
+
hidden_states = self.norm2(hidden_states)
|
509 |
+
return hidden_states
|
510 |
+
|
511 |
+
|
512 |
+
class TextImageTimeEmbedding(nn.Module):
|
513 |
+
def __init__(self, text_embed_dim: int = 768, image_embed_dim: int = 768, time_embed_dim: int = 1536):
|
514 |
+
super().__init__()
|
515 |
+
self.text_proj = nn.Linear(text_embed_dim, time_embed_dim)
|
516 |
+
self.text_norm = nn.LayerNorm(time_embed_dim)
|
517 |
+
self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
|
518 |
+
|
519 |
+
def forward(self, text_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor):
|
520 |
+
# text
|
521 |
+
time_text_embeds = self.text_proj(text_embeds)
|
522 |
+
time_text_embeds = self.text_norm(time_text_embeds)
|
523 |
+
|
524 |
+
# image
|
525 |
+
time_image_embeds = self.image_proj(image_embeds)
|
526 |
+
|
527 |
+
return time_image_embeds + time_text_embeds
|
528 |
+
|
529 |
+
|
530 |
+
class ImageTimeEmbedding(nn.Module):
|
531 |
+
def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536):
|
532 |
+
super().__init__()
|
533 |
+
self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
|
534 |
+
self.image_norm = nn.LayerNorm(time_embed_dim)
|
535 |
+
|
536 |
+
def forward(self, image_embeds: torch.FloatTensor):
|
537 |
+
# image
|
538 |
+
time_image_embeds = self.image_proj(image_embeds)
|
539 |
+
time_image_embeds = self.image_norm(time_image_embeds)
|
540 |
+
return time_image_embeds
|
541 |
+
|
542 |
+
|
543 |
+
class ImageHintTimeEmbedding(nn.Module):
|
544 |
+
def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536):
|
545 |
+
super().__init__()
|
546 |
+
self.image_proj = nn.Linear(image_embed_dim, time_embed_dim)
|
547 |
+
self.image_norm = nn.LayerNorm(time_embed_dim)
|
548 |
+
self.input_hint_block = nn.Sequential(
|
549 |
+
nn.Conv2d(3, 16, 3, padding=1),
|
550 |
+
nn.SiLU(),
|
551 |
+
nn.Conv2d(16, 16, 3, padding=1),
|
552 |
+
nn.SiLU(),
|
553 |
+
nn.Conv2d(16, 32, 3, padding=1, stride=2),
|
554 |
+
nn.SiLU(),
|
555 |
+
nn.Conv2d(32, 32, 3, padding=1),
|
556 |
+
nn.SiLU(),
|
557 |
+
nn.Conv2d(32, 96, 3, padding=1, stride=2),
|
558 |
+
nn.SiLU(),
|
559 |
+
nn.Conv2d(96, 96, 3, padding=1),
|
560 |
+
nn.SiLU(),
|
561 |
+
nn.Conv2d(96, 256, 3, padding=1, stride=2),
|
562 |
+
nn.SiLU(),
|
563 |
+
nn.Conv2d(256, 4, 3, padding=1),
|
564 |
+
)
|
565 |
+
|
566 |
+
def forward(self, image_embeds: torch.FloatTensor, hint: torch.FloatTensor):
|
567 |
+
# image
|
568 |
+
time_image_embeds = self.image_proj(image_embeds)
|
569 |
+
time_image_embeds = self.image_norm(time_image_embeds)
|
570 |
+
hint = self.input_hint_block(hint)
|
571 |
+
return time_image_embeds, hint
|
572 |
+
|
573 |
+
|
574 |
+
class AttentionPooling(nn.Module):
|
575 |
+
# Copied from https://github.com/deep-floyd/IF/blob/2f91391f27dd3c468bf174be5805b4cc92980c0b/deepfloyd_if/model/nn.py#L54
|
576 |
+
|
577 |
+
def __init__(self, num_heads, embed_dim, dtype=None):
|
578 |
+
super().__init__()
|
579 |
+
self.dtype = dtype
|
580 |
+
self.positional_embedding = nn.Parameter(torch.randn(1, embed_dim) / embed_dim**0.5)
|
581 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
|
582 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
|
583 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype)
|
584 |
+
self.num_heads = num_heads
|
585 |
+
self.dim_per_head = embed_dim // self.num_heads
|
586 |
+
|
587 |
+
def forward(self, x):
|
588 |
+
bs, length, width = x.size()
|
589 |
+
|
590 |
+
def shape(x):
|
591 |
+
# (bs, length, width) --> (bs, length, n_heads, dim_per_head)
|
592 |
+
x = x.view(bs, -1, self.num_heads, self.dim_per_head)
|
593 |
+
# (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
|
594 |
+
x = x.transpose(1, 2)
|
595 |
+
# (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
|
596 |
+
x = x.reshape(bs * self.num_heads, -1, self.dim_per_head)
|
597 |
+
# (bs*n_heads, length, dim_per_head) --> (bs*n_heads, dim_per_head, length)
|
598 |
+
x = x.transpose(1, 2)
|
599 |
+
return x
|
600 |
+
|
601 |
+
class_token = x.mean(dim=1, keepdim=True) + self.positional_embedding.to(x.dtype)
|
602 |
+
x = torch.cat([class_token, x], dim=1) # (bs, length+1, width)
|
603 |
+
|
604 |
+
# (bs*n_heads, class_token_length, dim_per_head)
|
605 |
+
q = shape(self.q_proj(class_token))
|
606 |
+
# (bs*n_heads, length+class_token_length, dim_per_head)
|
607 |
+
k = shape(self.k_proj(x))
|
608 |
+
v = shape(self.v_proj(x))
|
609 |
+
|
610 |
+
# (bs*n_heads, class_token_length, length+class_token_length):
|
611 |
+
scale = 1 / math.sqrt(math.sqrt(self.dim_per_head))
|
612 |
+
weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) # More stable with f16 than dividing afterwards
|
613 |
+
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
|
614 |
+
|
615 |
+
# (bs*n_heads, dim_per_head, class_token_length)
|
616 |
+
a = torch.einsum("bts,bcs->bct", weight, v)
|
617 |
+
|
618 |
+
# (bs, length+1, width)
|
619 |
+
a = a.reshape(bs, -1, 1).transpose(1, 2)
|
620 |
+
|
621 |
+
return a[:, 0, :] # cls_token
|
622 |
+
|
623 |
+
|
624 |
+
def get_fourier_embeds_from_boundingbox(embed_dim, box):
|
625 |
+
"""
|
626 |
+
Args:
|
627 |
+
embed_dim: int
|
628 |
+
box: a 3-D tensor [B x N x 4] representing the bounding boxes for GLIGEN pipeline
|
629 |
+
Returns:
|
630 |
+
[B x N x embed_dim] tensor of positional embeddings
|
631 |
+
"""
|
632 |
+
|
633 |
+
batch_size, num_boxes = box.shape[:2]
|
634 |
+
|
635 |
+
emb = 100 ** (torch.arange(embed_dim) / embed_dim)
|
636 |
+
emb = emb[None, None, None].to(device=box.device, dtype=box.dtype)
|
637 |
+
emb = emb * box.unsqueeze(-1)
|
638 |
+
|
639 |
+
emb = torch.stack((emb.sin(), emb.cos()), dim=-1)
|
640 |
+
emb = emb.permute(0, 1, 3, 4, 2).reshape(batch_size, num_boxes, embed_dim * 2 * 4)
|
641 |
+
|
642 |
+
return emb
|
643 |
+
|
644 |
+
|
645 |
+
class GLIGENTextBoundingboxProjection(nn.Module):
|
646 |
+
def __init__(self, positive_len, out_dim, feature_type="text-only", fourier_freqs=8):
|
647 |
+
super().__init__()
|
648 |
+
self.positive_len = positive_len
|
649 |
+
self.out_dim = out_dim
|
650 |
+
|
651 |
+
self.fourier_embedder_dim = fourier_freqs
|
652 |
+
self.position_dim = fourier_freqs * 2 * 4 # 2: sin/cos, 4: xyxy
|
653 |
+
|
654 |
+
if isinstance(out_dim, tuple):
|
655 |
+
out_dim = out_dim[0]
|
656 |
+
|
657 |
+
if feature_type == "text-only":
|
658 |
+
self.linears = nn.Sequential(
|
659 |
+
nn.Linear(self.positive_len + self.position_dim, 512),
|
660 |
+
nn.SiLU(),
|
661 |
+
nn.Linear(512, 512),
|
662 |
+
nn.SiLU(),
|
663 |
+
nn.Linear(512, out_dim),
|
664 |
+
)
|
665 |
+
self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
|
666 |
+
|
667 |
+
elif feature_type == "text-image":
|
668 |
+
self.linears_text = nn.Sequential(
|
669 |
+
nn.Linear(self.positive_len + self.position_dim, 512),
|
670 |
+
nn.SiLU(),
|
671 |
+
nn.Linear(512, 512),
|
672 |
+
nn.SiLU(),
|
673 |
+
nn.Linear(512, out_dim),
|
674 |
+
)
|
675 |
+
self.linears_image = nn.Sequential(
|
676 |
+
nn.Linear(self.positive_len + self.position_dim, 512),
|
677 |
+
nn.SiLU(),
|
678 |
+
nn.Linear(512, 512),
|
679 |
+
nn.SiLU(),
|
680 |
+
nn.Linear(512, out_dim),
|
681 |
+
)
|
682 |
+
self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
|
683 |
+
self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
|
684 |
+
|
685 |
+
self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim]))
|
686 |
+
|
687 |
+
def forward(
|
688 |
+
self,
|
689 |
+
boxes,
|
690 |
+
masks,
|
691 |
+
positive_embeddings=None,
|
692 |
+
phrases_masks=None,
|
693 |
+
image_masks=None,
|
694 |
+
phrases_embeddings=None,
|
695 |
+
image_embeddings=None,
|
696 |
+
):
|
697 |
+
masks = masks.unsqueeze(-1)
|
698 |
+
|
699 |
+
# embedding position (it may includes padding as placeholder)
|
700 |
+
xyxy_embedding = get_fourier_embeds_from_boundingbox(self.fourier_embedder_dim, boxes) # B*N*4 -> B*N*C
|
701 |
+
|
702 |
+
# learnable null embedding
|
703 |
+
xyxy_null = self.null_position_feature.view(1, 1, -1)
|
704 |
+
|
705 |
+
# replace padding with learnable null embedding
|
706 |
+
xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null
|
707 |
+
|
708 |
+
# positionet with text only information
|
709 |
+
if positive_embeddings is not None:
|
710 |
+
# learnable null embedding
|
711 |
+
positive_null = self.null_positive_feature.view(1, 1, -1)
|
712 |
+
|
713 |
+
# replace padding with learnable null embedding
|
714 |
+
positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null
|
715 |
+
|
716 |
+
objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1))
|
717 |
+
|
718 |
+
# positionet with text and image infomation
|
719 |
+
else:
|
720 |
+
phrases_masks = phrases_masks.unsqueeze(-1)
|
721 |
+
image_masks = image_masks.unsqueeze(-1)
|
722 |
+
|
723 |
+
# learnable null embedding
|
724 |
+
text_null = self.null_text_feature.view(1, 1, -1)
|
725 |
+
image_null = self.null_image_feature.view(1, 1, -1)
|
726 |
+
|
727 |
+
# replace padding with learnable null embedding
|
728 |
+
phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null
|
729 |
+
image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null
|
730 |
+
|
731 |
+
objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1))
|
732 |
+
objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1))
|
733 |
+
objs = torch.cat([objs_text, objs_image], dim=1)
|
734 |
+
|
735 |
+
return objs
|
736 |
+
|
737 |
+
|
738 |
+
class PixArtAlphaCombinedTimestepSizeEmbeddings(nn.Module):
|
739 |
+
"""
|
740 |
+
For PixArt-Alpha.
|
741 |
+
|
742 |
+
Reference:
|
743 |
+
https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L164C9-L168C29
|
744 |
+
"""
|
745 |
+
|
746 |
+
def __init__(self, embedding_dim, size_emb_dim, use_additional_conditions: bool = False):
|
747 |
+
super().__init__()
|
748 |
+
|
749 |
+
self.outdim = size_emb_dim
|
750 |
+
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
|
751 |
+
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
|
752 |
+
|
753 |
+
self.use_additional_conditions = use_additional_conditions
|
754 |
+
if use_additional_conditions:
|
755 |
+
self.additional_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
|
756 |
+
self.resolution_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim)
|
757 |
+
self.aspect_ratio_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim)
|
758 |
+
|
759 |
+
def forward(self, timestep, resolution, aspect_ratio, batch_size, hidden_dtype):
|
760 |
+
timesteps_proj = self.time_proj(timestep)
|
761 |
+
timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D)
|
762 |
+
|
763 |
+
if self.use_additional_conditions:
|
764 |
+
resolution_emb = self.additional_condition_proj(resolution.flatten()).to(hidden_dtype)
|
765 |
+
resolution_emb = self.resolution_embedder(resolution_emb).reshape(batch_size, -1)
|
766 |
+
aspect_ratio_emb = self.additional_condition_proj(aspect_ratio.flatten()).to(hidden_dtype)
|
767 |
+
aspect_ratio_emb = self.aspect_ratio_embedder(aspect_ratio_emb).reshape(batch_size, -1)
|
768 |
+
conditioning = timesteps_emb + torch.cat([resolution_emb, aspect_ratio_emb], dim=1)
|
769 |
+
else:
|
770 |
+
conditioning = timesteps_emb
|
771 |
+
|
772 |
+
return conditioning
|
773 |
+
|
774 |
+
|
775 |
+
class PixArtAlphaTextProjection(nn.Module):
|
776 |
+
"""
|
777 |
+
Projects caption embeddings. Also handles dropout for classifier-free guidance.
|
778 |
+
|
779 |
+
Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
|
780 |
+
"""
|
781 |
+
|
782 |
+
def __init__(self, in_features, hidden_size, num_tokens=120):
|
783 |
+
super().__init__()
|
784 |
+
self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True)
|
785 |
+
self.act_1 = nn.GELU(approximate="tanh")
|
786 |
+
self.linear_2 = nn.Linear(in_features=hidden_size, out_features=hidden_size, bias=True)
|
787 |
+
|
788 |
+
def forward(self, caption):
|
789 |
+
hidden_states = self.linear_1(caption)
|
790 |
+
hidden_states = self.act_1(hidden_states)
|
791 |
+
hidden_states = self.linear_2(hidden_states)
|
792 |
+
return hidden_states
|
793 |
+
|
794 |
+
|
795 |
+
class IPAdapterPlusImageProjection(nn.Module):
|
796 |
+
"""Resampler of IP-Adapter Plus.
|
797 |
+
|
798 |
+
Args:
|
799 |
+
----
|
800 |
+
embed_dims (int): The feature dimension. Defaults to 768.
|
801 |
+
output_dims (int): The number of output channels, that is the same
|
802 |
+
number of the channels in the
|
803 |
+
`unet.config.cross_attention_dim`. Defaults to 1024.
|
804 |
+
hidden_dims (int): The number of hidden channels. Defaults to 1280.
|
805 |
+
depth (int): The number of blocks. Defaults to 8.
|
806 |
+
dim_head (int): The number of head channels. Defaults to 64.
|
807 |
+
heads (int): Parallel attention heads. Defaults to 16.
|
808 |
+
num_queries (int): The number of queries. Defaults to 8.
|
809 |
+
ffn_ratio (float): The expansion ratio of feedforward network hidden
|
810 |
+
layer channels. Defaults to 4.
|
811 |
+
"""
|
812 |
+
|
813 |
+
def __init__(
|
814 |
+
self,
|
815 |
+
embed_dims: int = 768,
|
816 |
+
output_dims: int = 1024,
|
817 |
+
hidden_dims: int = 1280,
|
818 |
+
depth: int = 4,
|
819 |
+
dim_head: int = 64,
|
820 |
+
heads: int = 16,
|
821 |
+
num_queries: int = 8,
|
822 |
+
ffn_ratio: float = 4,
|
823 |
+
) -> None:
|
824 |
+
super().__init__()
|
825 |
+
from .attention import FeedForward # Lazy import to avoid circular import
|
826 |
+
|
827 |
+
self.latents = nn.Parameter(torch.randn(1, num_queries, hidden_dims) / hidden_dims**0.5)
|
828 |
+
|
829 |
+
self.proj_in = nn.Linear(embed_dims, hidden_dims)
|
830 |
+
|
831 |
+
self.proj_out = nn.Linear(hidden_dims, output_dims)
|
832 |
+
self.norm_out = nn.LayerNorm(output_dims)
|
833 |
+
|
834 |
+
self.layers = nn.ModuleList([])
|
835 |
+
for _ in range(depth):
|
836 |
+
self.layers.append(
|
837 |
+
nn.ModuleList(
|
838 |
+
[
|
839 |
+
nn.LayerNorm(hidden_dims),
|
840 |
+
nn.LayerNorm(hidden_dims),
|
841 |
+
Attention(
|
842 |
+
query_dim=hidden_dims,
|
843 |
+
dim_head=dim_head,
|
844 |
+
heads=heads,
|
845 |
+
out_bias=False,
|
846 |
+
),
|
847 |
+
nn.Sequential(
|
848 |
+
nn.LayerNorm(hidden_dims),
|
849 |
+
FeedForward(hidden_dims, hidden_dims, activation_fn="gelu", mult=ffn_ratio, bias=False),
|
850 |
+
),
|
851 |
+
]
|
852 |
+
)
|
853 |
+
)
|
854 |
+
|
855 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
856 |
+
"""Forward pass.
|
857 |
+
|
858 |
+
Args:
|
859 |
+
----
|
860 |
+
x (torch.Tensor): Input Tensor.
|
861 |
+
|
862 |
+
Returns:
|
863 |
+
-------
|
864 |
+
torch.Tensor: Output Tensor.
|
865 |
+
"""
|
866 |
+
latents = self.latents.repeat(x.size(0), 1, 1)
|
867 |
+
|
868 |
+
x = self.proj_in(x)
|
869 |
+
|
870 |
+
for ln0, ln1, attn, ff in self.layers:
|
871 |
+
residual = latents
|
872 |
+
|
873 |
+
encoder_hidden_states = ln0(x)
|
874 |
+
latents = ln1(latents)
|
875 |
+
encoder_hidden_states = torch.cat([encoder_hidden_states, latents], dim=-2)
|
876 |
+
latents = attn(latents, encoder_hidden_states) + residual
|
877 |
+
latents = ff(latents) + latents
|
878 |
+
|
879 |
+
latents = self.proj_out(latents)
|
880 |
+
return self.norm_out(latents)
|
diffusers/models/embeddings_flax.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import math
|
15 |
+
|
16 |
+
import flax.linen as nn
|
17 |
+
import jax.numpy as jnp
|
18 |
+
|
19 |
+
|
20 |
+
def get_sinusoidal_embeddings(
|
21 |
+
timesteps: jnp.ndarray,
|
22 |
+
embedding_dim: int,
|
23 |
+
freq_shift: float = 1,
|
24 |
+
min_timescale: float = 1,
|
25 |
+
max_timescale: float = 1.0e4,
|
26 |
+
flip_sin_to_cos: bool = False,
|
27 |
+
scale: float = 1.0,
|
28 |
+
) -> jnp.ndarray:
|
29 |
+
"""Returns the positional encoding (same as Tensor2Tensor).
|
30 |
+
|
31 |
+
Args:
|
32 |
+
timesteps: a 1-D Tensor of N indices, one per batch element.
|
33 |
+
These may be fractional.
|
34 |
+
embedding_dim: The number of output channels.
|
35 |
+
min_timescale: The smallest time unit (should probably be 0.0).
|
36 |
+
max_timescale: The largest time unit.
|
37 |
+
Returns:
|
38 |
+
a Tensor of timing signals [N, num_channels]
|
39 |
+
"""
|
40 |
+
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
|
41 |
+
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
|
42 |
+
num_timescales = float(embedding_dim // 2)
|
43 |
+
log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
|
44 |
+
inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)
|
45 |
+
emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)
|
46 |
+
|
47 |
+
# scale embeddings
|
48 |
+
scaled_time = scale * emb
|
49 |
+
|
50 |
+
if flip_sin_to_cos:
|
51 |
+
signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)
|
52 |
+
else:
|
53 |
+
signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)
|
54 |
+
signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])
|
55 |
+
return signal
|
56 |
+
|
57 |
+
|
58 |
+
class FlaxTimestepEmbedding(nn.Module):
|
59 |
+
r"""
|
60 |
+
Time step Embedding Module. Learns embeddings for input time steps.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
time_embed_dim (`int`, *optional*, defaults to `32`):
|
64 |
+
Time step embedding dimension
|
65 |
+
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
66 |
+
Parameters `dtype`
|
67 |
+
"""
|
68 |
+
|
69 |
+
time_embed_dim: int = 32
|
70 |
+
dtype: jnp.dtype = jnp.float32
|
71 |
+
|
72 |
+
@nn.compact
|
73 |
+
def __call__(self, temb):
|
74 |
+
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb)
|
75 |
+
temb = nn.silu(temb)
|
76 |
+
temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb)
|
77 |
+
return temb
|
78 |
+
|
79 |
+
|
80 |
+
class FlaxTimesteps(nn.Module):
|
81 |
+
r"""
|
82 |
+
Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239
|
83 |
+
|
84 |
+
Args:
|
85 |
+
dim (`int`, *optional*, defaults to `32`):
|
86 |
+
Time step embedding dimension
|
87 |
+
"""
|
88 |
+
|
89 |
+
dim: int = 32
|
90 |
+
flip_sin_to_cos: bool = False
|
91 |
+
freq_shift: float = 1
|
92 |
+
|
93 |
+
@nn.compact
|
94 |
+
def __call__(self, timesteps):
|
95 |
+
return get_sinusoidal_embeddings(
|
96 |
+
timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift
|
97 |
+
)
|
diffusers/models/lora.py
ADDED
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
|
16 |
+
# IMPORTANT: #
|
17 |
+
###################################################################
|
18 |
+
# ----------------------------------------------------------------#
|
19 |
+
# This file is deprecated and will be removed soon #
|
20 |
+
# (as soon as PEFT will become a required dependency for LoRA) #
|
21 |
+
# ----------------------------------------------------------------#
|
22 |
+
###################################################################
|
23 |
+
|
24 |
+
from typing import Optional, Tuple, Union
|
25 |
+
|
26 |
+
import torch
|
27 |
+
import torch.nn.functional as F
|
28 |
+
from torch import nn
|
29 |
+
|
30 |
+
from ..utils import logging
|
31 |
+
from ..utils.import_utils import is_transformers_available
|
32 |
+
|
33 |
+
|
34 |
+
if is_transformers_available():
|
35 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection
|
36 |
+
|
37 |
+
|
38 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
39 |
+
|
40 |
+
|
41 |
+
def text_encoder_attn_modules(text_encoder):
|
42 |
+
attn_modules = []
|
43 |
+
|
44 |
+
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
|
45 |
+
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
|
46 |
+
name = f"text_model.encoder.layers.{i}.self_attn"
|
47 |
+
mod = layer.self_attn
|
48 |
+
attn_modules.append((name, mod))
|
49 |
+
else:
|
50 |
+
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
|
51 |
+
|
52 |
+
return attn_modules
|
53 |
+
|
54 |
+
|
55 |
+
def text_encoder_mlp_modules(text_encoder):
|
56 |
+
mlp_modules = []
|
57 |
+
|
58 |
+
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
|
59 |
+
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
|
60 |
+
mlp_mod = layer.mlp
|
61 |
+
name = f"text_model.encoder.layers.{i}.mlp"
|
62 |
+
mlp_modules.append((name, mlp_mod))
|
63 |
+
else:
|
64 |
+
raise ValueError(f"do not know how to get mlp modules for: {text_encoder.__class__.__name__}")
|
65 |
+
|
66 |
+
return mlp_modules
|
67 |
+
|
68 |
+
|
69 |
+
def adjust_lora_scale_text_encoder(text_encoder, lora_scale: float = 1.0):
|
70 |
+
for _, attn_module in text_encoder_attn_modules(text_encoder):
|
71 |
+
if isinstance(attn_module.q_proj, PatchedLoraProjection):
|
72 |
+
attn_module.q_proj.lora_scale = lora_scale
|
73 |
+
attn_module.k_proj.lora_scale = lora_scale
|
74 |
+
attn_module.v_proj.lora_scale = lora_scale
|
75 |
+
attn_module.out_proj.lora_scale = lora_scale
|
76 |
+
|
77 |
+
for _, mlp_module in text_encoder_mlp_modules(text_encoder):
|
78 |
+
if isinstance(mlp_module.fc1, PatchedLoraProjection):
|
79 |
+
mlp_module.fc1.lora_scale = lora_scale
|
80 |
+
mlp_module.fc2.lora_scale = lora_scale
|
81 |
+
|
82 |
+
|
83 |
+
class PatchedLoraProjection(torch.nn.Module):
|
84 |
+
def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None):
|
85 |
+
super().__init__()
|
86 |
+
from ..models.lora import LoRALinearLayer
|
87 |
+
|
88 |
+
self.regular_linear_layer = regular_linear_layer
|
89 |
+
|
90 |
+
device = self.regular_linear_layer.weight.device
|
91 |
+
|
92 |
+
if dtype is None:
|
93 |
+
dtype = self.regular_linear_layer.weight.dtype
|
94 |
+
|
95 |
+
self.lora_linear_layer = LoRALinearLayer(
|
96 |
+
self.regular_linear_layer.in_features,
|
97 |
+
self.regular_linear_layer.out_features,
|
98 |
+
network_alpha=network_alpha,
|
99 |
+
device=device,
|
100 |
+
dtype=dtype,
|
101 |
+
rank=rank,
|
102 |
+
)
|
103 |
+
|
104 |
+
self.lora_scale = lora_scale
|
105 |
+
|
106 |
+
# overwrite PyTorch's `state_dict` to be sure that only the 'regular_linear_layer' weights are saved
|
107 |
+
# when saving the whole text encoder model and when LoRA is unloaded or fused
|
108 |
+
def state_dict(self, *args, destination=None, prefix="", keep_vars=False):
|
109 |
+
if self.lora_linear_layer is None:
|
110 |
+
return self.regular_linear_layer.state_dict(
|
111 |
+
*args, destination=destination, prefix=prefix, keep_vars=keep_vars
|
112 |
+
)
|
113 |
+
|
114 |
+
return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars)
|
115 |
+
|
116 |
+
def _fuse_lora(self, lora_scale=1.0, safe_fusing=False):
|
117 |
+
if self.lora_linear_layer is None:
|
118 |
+
return
|
119 |
+
|
120 |
+
dtype, device = self.regular_linear_layer.weight.data.dtype, self.regular_linear_layer.weight.data.device
|
121 |
+
|
122 |
+
w_orig = self.regular_linear_layer.weight.data.float()
|
123 |
+
w_up = self.lora_linear_layer.up.weight.data.float()
|
124 |
+
w_down = self.lora_linear_layer.down.weight.data.float()
|
125 |
+
|
126 |
+
if self.lora_linear_layer.network_alpha is not None:
|
127 |
+
w_up = w_up * self.lora_linear_layer.network_alpha / self.lora_linear_layer.rank
|
128 |
+
|
129 |
+
fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
|
130 |
+
|
131 |
+
if safe_fusing and torch.isnan(fused_weight).any().item():
|
132 |
+
raise ValueError(
|
133 |
+
"This LoRA weight seems to be broken. "
|
134 |
+
f"Encountered NaN values when trying to fuse LoRA weights for {self}."
|
135 |
+
"LoRA weights will not be fused."
|
136 |
+
)
|
137 |
+
|
138 |
+
self.regular_linear_layer.weight.data = fused_weight.to(device=device, dtype=dtype)
|
139 |
+
|
140 |
+
# we can drop the lora layer now
|
141 |
+
self.lora_linear_layer = None
|
142 |
+
|
143 |
+
# offload the up and down matrices to CPU to not blow the memory
|
144 |
+
self.w_up = w_up.cpu()
|
145 |
+
self.w_down = w_down.cpu()
|
146 |
+
self.lora_scale = lora_scale
|
147 |
+
|
148 |
+
def _unfuse_lora(self):
|
149 |
+
if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
|
150 |
+
return
|
151 |
+
|
152 |
+
fused_weight = self.regular_linear_layer.weight.data
|
153 |
+
dtype, device = fused_weight.dtype, fused_weight.device
|
154 |
+
|
155 |
+
w_up = self.w_up.to(device=device).float()
|
156 |
+
w_down = self.w_down.to(device).float()
|
157 |
+
|
158 |
+
unfused_weight = fused_weight.float() - (self.lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
|
159 |
+
self.regular_linear_layer.weight.data = unfused_weight.to(device=device, dtype=dtype)
|
160 |
+
|
161 |
+
self.w_up = None
|
162 |
+
self.w_down = None
|
163 |
+
|
164 |
+
def forward(self, input):
|
165 |
+
if self.lora_scale is None:
|
166 |
+
self.lora_scale = 1.0
|
167 |
+
if self.lora_linear_layer is None:
|
168 |
+
return self.regular_linear_layer(input)
|
169 |
+
return self.regular_linear_layer(input) + (self.lora_scale * self.lora_linear_layer(input))
|
170 |
+
|
171 |
+
|
172 |
+
class LoRALinearLayer(nn.Module):
|
173 |
+
r"""
|
174 |
+
A linear layer that is used with LoRA.
|
175 |
+
|
176 |
+
Parameters:
|
177 |
+
in_features (`int`):
|
178 |
+
Number of input features.
|
179 |
+
out_features (`int`):
|
180 |
+
Number of output features.
|
181 |
+
rank (`int`, `optional`, defaults to 4):
|
182 |
+
The rank of the LoRA layer.
|
183 |
+
network_alpha (`float`, `optional`, defaults to `None`):
|
184 |
+
The value of the network alpha used for stable learning and preventing underflow. This value has the same
|
185 |
+
meaning as the `--network_alpha` option in the kohya-ss trainer script. See
|
186 |
+
https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
|
187 |
+
device (`torch.device`, `optional`, defaults to `None`):
|
188 |
+
The device to use for the layer's weights.
|
189 |
+
dtype (`torch.dtype`, `optional`, defaults to `None`):
|
190 |
+
The dtype to use for the layer's weights.
|
191 |
+
"""
|
192 |
+
|
193 |
+
def __init__(
|
194 |
+
self,
|
195 |
+
in_features: int,
|
196 |
+
out_features: int,
|
197 |
+
rank: int = 4,
|
198 |
+
network_alpha: Optional[float] = None,
|
199 |
+
device: Optional[Union[torch.device, str]] = None,
|
200 |
+
dtype: Optional[torch.dtype] = None,
|
201 |
+
):
|
202 |
+
super().__init__()
|
203 |
+
|
204 |
+
self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype)
|
205 |
+
self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype)
|
206 |
+
# This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
|
207 |
+
# See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
|
208 |
+
self.network_alpha = network_alpha
|
209 |
+
self.rank = rank
|
210 |
+
self.out_features = out_features
|
211 |
+
self.in_features = in_features
|
212 |
+
|
213 |
+
nn.init.normal_(self.down.weight, std=1 / rank)
|
214 |
+
nn.init.zeros_(self.up.weight)
|
215 |
+
|
216 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
217 |
+
orig_dtype = hidden_states.dtype
|
218 |
+
dtype = self.down.weight.dtype
|
219 |
+
|
220 |
+
down_hidden_states = self.down(hidden_states.to(dtype))
|
221 |
+
up_hidden_states = self.up(down_hidden_states)
|
222 |
+
|
223 |
+
if self.network_alpha is not None:
|
224 |
+
up_hidden_states *= self.network_alpha / self.rank
|
225 |
+
|
226 |
+
return up_hidden_states.to(orig_dtype)
|
227 |
+
|
228 |
+
|
229 |
+
class LoRAConv2dLayer(nn.Module):
|
230 |
+
r"""
|
231 |
+
A convolutional layer that is used with LoRA.
|
232 |
+
|
233 |
+
Parameters:
|
234 |
+
in_features (`int`):
|
235 |
+
Number of input features.
|
236 |
+
out_features (`int`):
|
237 |
+
Number of output features.
|
238 |
+
rank (`int`, `optional`, defaults to 4):
|
239 |
+
The rank of the LoRA layer.
|
240 |
+
kernel_size (`int` or `tuple` of two `int`, `optional`, defaults to 1):
|
241 |
+
The kernel size of the convolution.
|
242 |
+
stride (`int` or `tuple` of two `int`, `optional`, defaults to 1):
|
243 |
+
The stride of the convolution.
|
244 |
+
padding (`int` or `tuple` of two `int` or `str`, `optional`, defaults to 0):
|
245 |
+
The padding of the convolution.
|
246 |
+
network_alpha (`float`, `optional`, defaults to `None`):
|
247 |
+
The value of the network alpha used for stable learning and preventing underflow. This value has the same
|
248 |
+
meaning as the `--network_alpha` option in the kohya-ss trainer script. See
|
249 |
+
https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
|
250 |
+
"""
|
251 |
+
|
252 |
+
def __init__(
|
253 |
+
self,
|
254 |
+
in_features: int,
|
255 |
+
out_features: int,
|
256 |
+
rank: int = 4,
|
257 |
+
kernel_size: Union[int, Tuple[int, int]] = (1, 1),
|
258 |
+
stride: Union[int, Tuple[int, int]] = (1, 1),
|
259 |
+
padding: Union[int, Tuple[int, int], str] = 0,
|
260 |
+
network_alpha: Optional[float] = None,
|
261 |
+
):
|
262 |
+
super().__init__()
|
263 |
+
|
264 |
+
self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
|
265 |
+
# according to the official kohya_ss trainer kernel_size are always fixed for the up layer
|
266 |
+
# # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129
|
267 |
+
self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False)
|
268 |
+
|
269 |
+
# This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
|
270 |
+
# See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
|
271 |
+
self.network_alpha = network_alpha
|
272 |
+
self.rank = rank
|
273 |
+
|
274 |
+
nn.init.normal_(self.down.weight, std=1 / rank)
|
275 |
+
nn.init.zeros_(self.up.weight)
|
276 |
+
|
277 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
278 |
+
orig_dtype = hidden_states.dtype
|
279 |
+
dtype = self.down.weight.dtype
|
280 |
+
|
281 |
+
down_hidden_states = self.down(hidden_states.to(dtype))
|
282 |
+
up_hidden_states = self.up(down_hidden_states)
|
283 |
+
|
284 |
+
if self.network_alpha is not None:
|
285 |
+
up_hidden_states *= self.network_alpha / self.rank
|
286 |
+
|
287 |
+
return up_hidden_states.to(orig_dtype)
|
288 |
+
|
289 |
+
|
290 |
+
class LoRACompatibleConv(nn.Conv2d):
|
291 |
+
"""
|
292 |
+
A convolutional layer that can be used with LoRA.
|
293 |
+
"""
|
294 |
+
|
295 |
+
def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs):
|
296 |
+
super().__init__(*args, **kwargs)
|
297 |
+
self.lora_layer = lora_layer
|
298 |
+
|
299 |
+
def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):
|
300 |
+
self.lora_layer = lora_layer
|
301 |
+
|
302 |
+
def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):
|
303 |
+
if self.lora_layer is None:
|
304 |
+
return
|
305 |
+
|
306 |
+
dtype, device = self.weight.data.dtype, self.weight.data.device
|
307 |
+
|
308 |
+
w_orig = self.weight.data.float()
|
309 |
+
w_up = self.lora_layer.up.weight.data.float()
|
310 |
+
w_down = self.lora_layer.down.weight.data.float()
|
311 |
+
|
312 |
+
if self.lora_layer.network_alpha is not None:
|
313 |
+
w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank
|
314 |
+
|
315 |
+
fusion = torch.mm(w_up.flatten(start_dim=1), w_down.flatten(start_dim=1))
|
316 |
+
fusion = fusion.reshape((w_orig.shape))
|
317 |
+
fused_weight = w_orig + (lora_scale * fusion)
|
318 |
+
|
319 |
+
if safe_fusing and torch.isnan(fused_weight).any().item():
|
320 |
+
raise ValueError(
|
321 |
+
"This LoRA weight seems to be broken. "
|
322 |
+
f"Encountered NaN values when trying to fuse LoRA weights for {self}."
|
323 |
+
"LoRA weights will not be fused."
|
324 |
+
)
|
325 |
+
|
326 |
+
self.weight.data = fused_weight.to(device=device, dtype=dtype)
|
327 |
+
|
328 |
+
# we can drop the lora layer now
|
329 |
+
self.lora_layer = None
|
330 |
+
|
331 |
+
# offload the up and down matrices to CPU to not blow the memory
|
332 |
+
self.w_up = w_up.cpu()
|
333 |
+
self.w_down = w_down.cpu()
|
334 |
+
self._lora_scale = lora_scale
|
335 |
+
|
336 |
+
def _unfuse_lora(self):
|
337 |
+
if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
|
338 |
+
return
|
339 |
+
|
340 |
+
fused_weight = self.weight.data
|
341 |
+
dtype, device = fused_weight.data.dtype, fused_weight.data.device
|
342 |
+
|
343 |
+
self.w_up = self.w_up.to(device=device).float()
|
344 |
+
self.w_down = self.w_down.to(device).float()
|
345 |
+
|
346 |
+
fusion = torch.mm(self.w_up.flatten(start_dim=1), self.w_down.flatten(start_dim=1))
|
347 |
+
fusion = fusion.reshape((fused_weight.shape))
|
348 |
+
unfused_weight = fused_weight.float() - (self._lora_scale * fusion)
|
349 |
+
self.weight.data = unfused_weight.to(device=device, dtype=dtype)
|
350 |
+
|
351 |
+
self.w_up = None
|
352 |
+
self.w_down = None
|
353 |
+
|
354 |
+
def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
|
355 |
+
if self.lora_layer is None:
|
356 |
+
# make sure to the functional Conv2D function as otherwise torch.compile's graph will break
|
357 |
+
# see: https://github.com/huggingface/diffusers/pull/4315
|
358 |
+
return F.conv2d(
|
359 |
+
hidden_states, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups
|
360 |
+
)
|
361 |
+
else:
|
362 |
+
original_outputs = F.conv2d(
|
363 |
+
hidden_states, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups
|
364 |
+
)
|
365 |
+
return original_outputs + (scale * self.lora_layer(hidden_states))
|
366 |
+
|
367 |
+
|
368 |
+
class LoRACompatibleLinear(nn.Linear):
|
369 |
+
"""
|
370 |
+
A Linear layer that can be used with LoRA.
|
371 |
+
"""
|
372 |
+
|
373 |
+
def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):
|
374 |
+
super().__init__(*args, **kwargs)
|
375 |
+
self.lora_layer = lora_layer
|
376 |
+
|
377 |
+
def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]):
|
378 |
+
self.lora_layer = lora_layer
|
379 |
+
|
380 |
+
def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):
|
381 |
+
if self.lora_layer is None:
|
382 |
+
return
|
383 |
+
|
384 |
+
dtype, device = self.weight.data.dtype, self.weight.data.device
|
385 |
+
|
386 |
+
w_orig = self.weight.data.float()
|
387 |
+
w_up = self.lora_layer.up.weight.data.float()
|
388 |
+
w_down = self.lora_layer.down.weight.data.float()
|
389 |
+
|
390 |
+
if self.lora_layer.network_alpha is not None:
|
391 |
+
w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank
|
392 |
+
|
393 |
+
fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
|
394 |
+
|
395 |
+
if safe_fusing and torch.isnan(fused_weight).any().item():
|
396 |
+
raise ValueError(
|
397 |
+
"This LoRA weight seems to be broken. "
|
398 |
+
f"Encountered NaN values when trying to fuse LoRA weights for {self}."
|
399 |
+
"LoRA weights will not be fused."
|
400 |
+
)
|
401 |
+
|
402 |
+
self.weight.data = fused_weight.to(device=device, dtype=dtype)
|
403 |
+
|
404 |
+
# we can drop the lora layer now
|
405 |
+
self.lora_layer = None
|
406 |
+
|
407 |
+
# offload the up and down matrices to CPU to not blow the memory
|
408 |
+
self.w_up = w_up.cpu()
|
409 |
+
self.w_down = w_down.cpu()
|
410 |
+
self._lora_scale = lora_scale
|
411 |
+
|
412 |
+
def _unfuse_lora(self):
|
413 |
+
if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
|
414 |
+
return
|
415 |
+
|
416 |
+
fused_weight = self.weight.data
|
417 |
+
dtype, device = fused_weight.dtype, fused_weight.device
|
418 |
+
|
419 |
+
w_up = self.w_up.to(device=device).float()
|
420 |
+
w_down = self.w_down.to(device).float()
|
421 |
+
|
422 |
+
unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
|
423 |
+
self.weight.data = unfused_weight.to(device=device, dtype=dtype)
|
424 |
+
|
425 |
+
self.w_up = None
|
426 |
+
self.w_down = None
|
427 |
+
|
428 |
+
def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
|
429 |
+
if self.lora_layer is None:
|
430 |
+
out = super().forward(hidden_states)
|
431 |
+
return out
|
432 |
+
else:
|
433 |
+
out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states))
|
434 |
+
return out
|
diffusers/models/modeling_flax_pytorch_utils.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch - Flax general utilities."""
|
16 |
+
import re
|
17 |
+
|
18 |
+
import jax.numpy as jnp
|
19 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
20 |
+
from jax.random import PRNGKey
|
21 |
+
|
22 |
+
from ..utils import logging
|
23 |
+
|
24 |
+
|
25 |
+
logger = logging.get_logger(__name__)
|
26 |
+
|
27 |
+
|
28 |
+
def rename_key(key):
|
29 |
+
regex = r"\w+[.]\d+"
|
30 |
+
pats = re.findall(regex, key)
|
31 |
+
for pat in pats:
|
32 |
+
key = key.replace(pat, "_".join(pat.split(".")))
|
33 |
+
return key
|
34 |
+
|
35 |
+
|
36 |
+
#####################
|
37 |
+
# PyTorch => Flax #
|
38 |
+
#####################
|
39 |
+
|
40 |
+
|
41 |
+
# Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69
|
42 |
+
# and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py
|
43 |
+
def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict):
|
44 |
+
"""Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary"""
|
45 |
+
# conv norm or layer norm
|
46 |
+
renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
|
47 |
+
|
48 |
+
# rename attention layers
|
49 |
+
if len(pt_tuple_key) > 1:
|
50 |
+
for rename_from, rename_to in (
|
51 |
+
("to_out_0", "proj_attn"),
|
52 |
+
("to_k", "key"),
|
53 |
+
("to_v", "value"),
|
54 |
+
("to_q", "query"),
|
55 |
+
):
|
56 |
+
if pt_tuple_key[-2] == rename_from:
|
57 |
+
weight_name = pt_tuple_key[-1]
|
58 |
+
weight_name = "kernel" if weight_name == "weight" else weight_name
|
59 |
+
renamed_pt_tuple_key = pt_tuple_key[:-2] + (rename_to, weight_name)
|
60 |
+
if renamed_pt_tuple_key in random_flax_state_dict:
|
61 |
+
assert random_flax_state_dict[renamed_pt_tuple_key].shape == pt_tensor.T.shape
|
62 |
+
return renamed_pt_tuple_key, pt_tensor.T
|
63 |
+
|
64 |
+
if (
|
65 |
+
any("norm" in str_ for str_ in pt_tuple_key)
|
66 |
+
and (pt_tuple_key[-1] == "bias")
|
67 |
+
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
|
68 |
+
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
|
69 |
+
):
|
70 |
+
renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
|
71 |
+
return renamed_pt_tuple_key, pt_tensor
|
72 |
+
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
|
73 |
+
renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",)
|
74 |
+
return renamed_pt_tuple_key, pt_tensor
|
75 |
+
|
76 |
+
# embedding
|
77 |
+
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
|
78 |
+
pt_tuple_key = pt_tuple_key[:-1] + ("embedding",)
|
79 |
+
return renamed_pt_tuple_key, pt_tensor
|
80 |
+
|
81 |
+
# conv layer
|
82 |
+
renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",)
|
83 |
+
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
|
84 |
+
pt_tensor = pt_tensor.transpose(2, 3, 1, 0)
|
85 |
+
return renamed_pt_tuple_key, pt_tensor
|
86 |
+
|
87 |
+
# linear layer
|
88 |
+
renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",)
|
89 |
+
if pt_tuple_key[-1] == "weight":
|
90 |
+
pt_tensor = pt_tensor.T
|
91 |
+
return renamed_pt_tuple_key, pt_tensor
|
92 |
+
|
93 |
+
# old PyTorch layer norm weight
|
94 |
+
renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",)
|
95 |
+
if pt_tuple_key[-1] == "gamma":
|
96 |
+
return renamed_pt_tuple_key, pt_tensor
|
97 |
+
|
98 |
+
# old PyTorch layer norm bias
|
99 |
+
renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",)
|
100 |
+
if pt_tuple_key[-1] == "beta":
|
101 |
+
return renamed_pt_tuple_key, pt_tensor
|
102 |
+
|
103 |
+
return pt_tuple_key, pt_tensor
|
104 |
+
|
105 |
+
|
106 |
+
def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42):
|
107 |
+
# Step 1: Convert pytorch tensor to numpy
|
108 |
+
pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()}
|
109 |
+
|
110 |
+
# Step 2: Since the model is stateless, get random Flax params
|
111 |
+
random_flax_params = flax_model.init_weights(PRNGKey(init_key))
|
112 |
+
|
113 |
+
random_flax_state_dict = flatten_dict(random_flax_params)
|
114 |
+
flax_state_dict = {}
|
115 |
+
|
116 |
+
# Need to change some parameters name to match Flax names
|
117 |
+
for pt_key, pt_tensor in pt_state_dict.items():
|
118 |
+
renamed_pt_key = rename_key(pt_key)
|
119 |
+
pt_tuple_key = tuple(renamed_pt_key.split("."))
|
120 |
+
|
121 |
+
# Correctly rename weight parameters
|
122 |
+
flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict)
|
123 |
+
|
124 |
+
if flax_key in random_flax_state_dict:
|
125 |
+
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
|
126 |
+
raise ValueError(
|
127 |
+
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
|
128 |
+
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}."
|
129 |
+
)
|
130 |
+
|
131 |
+
# also add unexpected weight so that warning is thrown
|
132 |
+
flax_state_dict[flax_key] = jnp.asarray(flax_tensor)
|
133 |
+
|
134 |
+
return unflatten_dict(flax_state_dict)
|
diffusers/models/modeling_flax_utils.py
ADDED
@@ -0,0 +1,566 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import os
|
17 |
+
from pickle import UnpicklingError
|
18 |
+
from typing import Any, Dict, Union
|
19 |
+
|
20 |
+
import jax
|
21 |
+
import jax.numpy as jnp
|
22 |
+
import msgpack.exceptions
|
23 |
+
from flax.core.frozen_dict import FrozenDict, unfreeze
|
24 |
+
from flax.serialization import from_bytes, to_bytes
|
25 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
26 |
+
from huggingface_hub import create_repo, hf_hub_download
|
27 |
+
from huggingface_hub.utils import (
|
28 |
+
EntryNotFoundError,
|
29 |
+
RepositoryNotFoundError,
|
30 |
+
RevisionNotFoundError,
|
31 |
+
validate_hf_hub_args,
|
32 |
+
)
|
33 |
+
from requests import HTTPError
|
34 |
+
|
35 |
+
from .. import __version__, is_torch_available
|
36 |
+
from ..utils import (
|
37 |
+
CONFIG_NAME,
|
38 |
+
FLAX_WEIGHTS_NAME,
|
39 |
+
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
|
40 |
+
WEIGHTS_NAME,
|
41 |
+
PushToHubMixin,
|
42 |
+
logging,
|
43 |
+
)
|
44 |
+
from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax
|
45 |
+
|
46 |
+
|
47 |
+
logger = logging.get_logger(__name__)
|
48 |
+
|
49 |
+
|
50 |
+
class FlaxModelMixin(PushToHubMixin):
|
51 |
+
r"""
|
52 |
+
Base class for all Flax models.
|
53 |
+
|
54 |
+
[`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and
|
55 |
+
saving models.
|
56 |
+
|
57 |
+
- **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`].
|
58 |
+
"""
|
59 |
+
|
60 |
+
config_name = CONFIG_NAME
|
61 |
+
_automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"]
|
62 |
+
_flax_internal_args = ["name", "parent", "dtype"]
|
63 |
+
|
64 |
+
@classmethod
|
65 |
+
def _from_config(cls, config, **kwargs):
|
66 |
+
"""
|
67 |
+
All context managers that the model should be initialized under go here.
|
68 |
+
"""
|
69 |
+
return cls(config, **kwargs)
|
70 |
+
|
71 |
+
def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:
|
72 |
+
"""
|
73 |
+
Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.
|
74 |
+
"""
|
75 |
+
|
76 |
+
# taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27
|
77 |
+
def conditional_cast(param):
|
78 |
+
if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):
|
79 |
+
param = param.astype(dtype)
|
80 |
+
return param
|
81 |
+
|
82 |
+
if mask is None:
|
83 |
+
return jax.tree_map(conditional_cast, params)
|
84 |
+
|
85 |
+
flat_params = flatten_dict(params)
|
86 |
+
flat_mask, _ = jax.tree_flatten(mask)
|
87 |
+
|
88 |
+
for masked, key in zip(flat_mask, flat_params.keys()):
|
89 |
+
if masked:
|
90 |
+
param = flat_params[key]
|
91 |
+
flat_params[key] = conditional_cast(param)
|
92 |
+
|
93 |
+
return unflatten_dict(flat_params)
|
94 |
+
|
95 |
+
def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):
|
96 |
+
r"""
|
97 |
+
Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast
|
98 |
+
the `params` in place.
|
99 |
+
|
100 |
+
This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full
|
101 |
+
half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.
|
102 |
+
|
103 |
+
Arguments:
|
104 |
+
params (`Union[Dict, FrozenDict]`):
|
105 |
+
A `PyTree` of model parameters.
|
106 |
+
mask (`Union[Dict, FrozenDict]`):
|
107 |
+
A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
|
108 |
+
for params you want to cast, and `False` for those you want to skip.
|
109 |
+
|
110 |
+
Examples:
|
111 |
+
|
112 |
+
```python
|
113 |
+
>>> from diffusers import FlaxUNet2DConditionModel
|
114 |
+
|
115 |
+
>>> # load model
|
116 |
+
>>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
|
117 |
+
>>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision
|
118 |
+
>>> params = model.to_bf16(params)
|
119 |
+
>>> # If you don't want to cast certain parameters (for example layer norm bias and scale)
|
120 |
+
>>> # then pass the mask as follows
|
121 |
+
>>> from flax import traverse_util
|
122 |
+
|
123 |
+
>>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
|
124 |
+
>>> flat_params = traverse_util.flatten_dict(params)
|
125 |
+
>>> mask = {
|
126 |
+
... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
|
127 |
+
... for path in flat_params
|
128 |
+
... }
|
129 |
+
>>> mask = traverse_util.unflatten_dict(mask)
|
130 |
+
>>> params = model.to_bf16(params, mask)
|
131 |
+
```"""
|
132 |
+
return self._cast_floating_to(params, jnp.bfloat16, mask)
|
133 |
+
|
134 |
+
def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):
|
135 |
+
r"""
|
136 |
+
Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the
|
137 |
+
model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.
|
138 |
+
|
139 |
+
Arguments:
|
140 |
+
params (`Union[Dict, FrozenDict]`):
|
141 |
+
A `PyTree` of model parameters.
|
142 |
+
mask (`Union[Dict, FrozenDict]`):
|
143 |
+
A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
|
144 |
+
for params you want to cast, and `False` for those you want to skip.
|
145 |
+
|
146 |
+
Examples:
|
147 |
+
|
148 |
+
```python
|
149 |
+
>>> from diffusers import FlaxUNet2DConditionModel
|
150 |
+
|
151 |
+
>>> # Download model and configuration from huggingface.co
|
152 |
+
>>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
|
153 |
+
>>> # By default, the model params will be in fp32, to illustrate the use of this method,
|
154 |
+
>>> # we'll first cast to fp16 and back to fp32
|
155 |
+
>>> params = model.to_f16(params)
|
156 |
+
>>> # now cast back to fp32
|
157 |
+
>>> params = model.to_fp32(params)
|
158 |
+
```"""
|
159 |
+
return self._cast_floating_to(params, jnp.float32, mask)
|
160 |
+
|
161 |
+
def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):
|
162 |
+
r"""
|
163 |
+
Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the
|
164 |
+
`params` in place.
|
165 |
+
|
166 |
+
This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full
|
167 |
+
half-precision training or to save weights in float16 for inference in order to save memory and improve speed.
|
168 |
+
|
169 |
+
Arguments:
|
170 |
+
params (`Union[Dict, FrozenDict]`):
|
171 |
+
A `PyTree` of model parameters.
|
172 |
+
mask (`Union[Dict, FrozenDict]`):
|
173 |
+
A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
|
174 |
+
for params you want to cast, and `False` for those you want to skip.
|
175 |
+
|
176 |
+
Examples:
|
177 |
+
|
178 |
+
```python
|
179 |
+
>>> from diffusers import FlaxUNet2DConditionModel
|
180 |
+
|
181 |
+
>>> # load model
|
182 |
+
>>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
|
183 |
+
>>> # By default, the model params will be in fp32, to cast these to float16
|
184 |
+
>>> params = model.to_fp16(params)
|
185 |
+
>>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
|
186 |
+
>>> # then pass the mask as follows
|
187 |
+
>>> from flax import traverse_util
|
188 |
+
|
189 |
+
>>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
|
190 |
+
>>> flat_params = traverse_util.flatten_dict(params)
|
191 |
+
>>> mask = {
|
192 |
+
... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
|
193 |
+
... for path in flat_params
|
194 |
+
... }
|
195 |
+
>>> mask = traverse_util.unflatten_dict(mask)
|
196 |
+
>>> params = model.to_fp16(params, mask)
|
197 |
+
```"""
|
198 |
+
return self._cast_floating_to(params, jnp.float16, mask)
|
199 |
+
|
200 |
+
def init_weights(self, rng: jax.Array) -> Dict:
|
201 |
+
raise NotImplementedError(f"init_weights method has to be implemented for {self}")
|
202 |
+
|
203 |
+
@classmethod
|
204 |
+
@validate_hf_hub_args
|
205 |
+
def from_pretrained(
|
206 |
+
cls,
|
207 |
+
pretrained_model_name_or_path: Union[str, os.PathLike],
|
208 |
+
dtype: jnp.dtype = jnp.float32,
|
209 |
+
*model_args,
|
210 |
+
**kwargs,
|
211 |
+
):
|
212 |
+
r"""
|
213 |
+
Instantiate a pretrained Flax model from a pretrained model configuration.
|
214 |
+
|
215 |
+
Parameters:
|
216 |
+
pretrained_model_name_or_path (`str` or `os.PathLike`):
|
217 |
+
Can be either:
|
218 |
+
|
219 |
+
- A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model
|
220 |
+
hosted on the Hub.
|
221 |
+
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
|
222 |
+
using [`~FlaxModelMixin.save_pretrained`].
|
223 |
+
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
|
224 |
+
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
|
225 |
+
`jax.numpy.bfloat16` (on TPUs).
|
226 |
+
|
227 |
+
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
|
228 |
+
specified, all the computation will be performed with the given `dtype`.
|
229 |
+
|
230 |
+
<Tip>
|
231 |
+
|
232 |
+
This only specifies the dtype of the *computation* and does not influence the dtype of model
|
233 |
+
parameters.
|
234 |
+
|
235 |
+
If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and
|
236 |
+
[`~FlaxModelMixin.to_bf16`].
|
237 |
+
|
238 |
+
</Tip>
|
239 |
+
|
240 |
+
model_args (sequence of positional arguments, *optional*):
|
241 |
+
All remaining positional arguments are passed to the underlying model's `__init__` method.
|
242 |
+
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
243 |
+
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
244 |
+
is not used.
|
245 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
246 |
+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
247 |
+
cached versions if they exist.
|
248 |
+
resume_download (`bool`, *optional*, defaults to `False`):
|
249 |
+
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
|
250 |
+
incompletely downloaded files are deleted.
|
251 |
+
proxies (`Dict[str, str]`, *optional*):
|
252 |
+
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
253 |
+
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
254 |
+
local_files_only(`bool`, *optional*, defaults to `False`):
|
255 |
+
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
256 |
+
won't be downloaded from the Hub.
|
257 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
258 |
+
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
259 |
+
allowed by Git.
|
260 |
+
from_pt (`bool`, *optional*, defaults to `False`):
|
261 |
+
Load the model weights from a PyTorch checkpoint save file.
|
262 |
+
kwargs (remaining dictionary of keyword arguments, *optional*):
|
263 |
+
Can be used to update the configuration object (after it is loaded) and initiate the model (for
|
264 |
+
example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
|
265 |
+
automatically loaded:
|
266 |
+
|
267 |
+
- If a configuration is provided with `config`, `kwargs` are directly passed to the underlying
|
268 |
+
model's `__init__` method (we assume all relevant updates to the configuration have already been
|
269 |
+
done).
|
270 |
+
- If a configuration is not provided, `kwargs` are first passed to the configuration class
|
271 |
+
initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds
|
272 |
+
to a configuration attribute is used to override said attribute with the supplied `kwargs` value.
|
273 |
+
Remaining keys that do not correspond to any configuration attribute are passed to the underlying
|
274 |
+
model's `__init__` function.
|
275 |
+
|
276 |
+
Examples:
|
277 |
+
|
278 |
+
```python
|
279 |
+
>>> from diffusers import FlaxUNet2DConditionModel
|
280 |
+
|
281 |
+
>>> # Download model and configuration from huggingface.co and cache.
|
282 |
+
>>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
|
283 |
+
>>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
|
284 |
+
>>> model, params = FlaxUNet2DConditionModel.from_pretrained("./test/saved_model/")
|
285 |
+
```
|
286 |
+
|
287 |
+
If you get the error message below, you need to finetune the weights for your downstream task:
|
288 |
+
|
289 |
+
```bash
|
290 |
+
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
|
291 |
+
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
|
292 |
+
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
293 |
+
```
|
294 |
+
"""
|
295 |
+
config = kwargs.pop("config", None)
|
296 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
297 |
+
force_download = kwargs.pop("force_download", False)
|
298 |
+
from_pt = kwargs.pop("from_pt", False)
|
299 |
+
resume_download = kwargs.pop("resume_download", False)
|
300 |
+
proxies = kwargs.pop("proxies", None)
|
301 |
+
local_files_only = kwargs.pop("local_files_only", False)
|
302 |
+
token = kwargs.pop("token", None)
|
303 |
+
revision = kwargs.pop("revision", None)
|
304 |
+
subfolder = kwargs.pop("subfolder", None)
|
305 |
+
|
306 |
+
user_agent = {
|
307 |
+
"diffusers": __version__,
|
308 |
+
"file_type": "model",
|
309 |
+
"framework": "flax",
|
310 |
+
}
|
311 |
+
|
312 |
+
# Load config if we don't provide one
|
313 |
+
if config is None:
|
314 |
+
config, unused_kwargs = cls.load_config(
|
315 |
+
pretrained_model_name_or_path,
|
316 |
+
cache_dir=cache_dir,
|
317 |
+
return_unused_kwargs=True,
|
318 |
+
force_download=force_download,
|
319 |
+
resume_download=resume_download,
|
320 |
+
proxies=proxies,
|
321 |
+
local_files_only=local_files_only,
|
322 |
+
token=token,
|
323 |
+
revision=revision,
|
324 |
+
subfolder=subfolder,
|
325 |
+
**kwargs,
|
326 |
+
)
|
327 |
+
|
328 |
+
model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs)
|
329 |
+
|
330 |
+
# Load model
|
331 |
+
pretrained_path_with_subfolder = (
|
332 |
+
pretrained_model_name_or_path
|
333 |
+
if subfolder is None
|
334 |
+
else os.path.join(pretrained_model_name_or_path, subfolder)
|
335 |
+
)
|
336 |
+
if os.path.isdir(pretrained_path_with_subfolder):
|
337 |
+
if from_pt:
|
338 |
+
if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):
|
339 |
+
raise EnvironmentError(
|
340 |
+
f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} "
|
341 |
+
)
|
342 |
+
model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)
|
343 |
+
elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)):
|
344 |
+
# Load from a Flax checkpoint
|
345 |
+
model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)
|
346 |
+
# Check if pytorch weights exist instead
|
347 |
+
elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):
|
348 |
+
raise EnvironmentError(
|
349 |
+
f"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model"
|
350 |
+
" using `from_pt=True`."
|
351 |
+
)
|
352 |
+
else:
|
353 |
+
raise EnvironmentError(
|
354 |
+
f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory "
|
355 |
+
f"{pretrained_path_with_subfolder}."
|
356 |
+
)
|
357 |
+
else:
|
358 |
+
try:
|
359 |
+
model_file = hf_hub_download(
|
360 |
+
pretrained_model_name_or_path,
|
361 |
+
filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME,
|
362 |
+
cache_dir=cache_dir,
|
363 |
+
force_download=force_download,
|
364 |
+
proxies=proxies,
|
365 |
+
resume_download=resume_download,
|
366 |
+
local_files_only=local_files_only,
|
367 |
+
token=token,
|
368 |
+
user_agent=user_agent,
|
369 |
+
subfolder=subfolder,
|
370 |
+
revision=revision,
|
371 |
+
)
|
372 |
+
|
373 |
+
except RepositoryNotFoundError:
|
374 |
+
raise EnvironmentError(
|
375 |
+
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
|
376 |
+
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
|
377 |
+
"token having permission to this repo with `token` or log in with `huggingface-cli "
|
378 |
+
"login`."
|
379 |
+
)
|
380 |
+
except RevisionNotFoundError:
|
381 |
+
raise EnvironmentError(
|
382 |
+
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
|
383 |
+
"this model name. Check the model page at "
|
384 |
+
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
|
385 |
+
)
|
386 |
+
except EntryNotFoundError:
|
387 |
+
raise EnvironmentError(
|
388 |
+
f"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}."
|
389 |
+
)
|
390 |
+
except HTTPError as err:
|
391 |
+
raise EnvironmentError(
|
392 |
+
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n"
|
393 |
+
f"{err}"
|
394 |
+
)
|
395 |
+
except ValueError:
|
396 |
+
raise EnvironmentError(
|
397 |
+
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
|
398 |
+
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
|
399 |
+
f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your"
|
400 |
+
" internet connection or see how to run the library in offline mode at"
|
401 |
+
" 'https://huggingface.co/docs/transformers/installation#offline-mode'."
|
402 |
+
)
|
403 |
+
except EnvironmentError:
|
404 |
+
raise EnvironmentError(
|
405 |
+
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
|
406 |
+
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
|
407 |
+
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
|
408 |
+
f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}."
|
409 |
+
)
|
410 |
+
|
411 |
+
if from_pt:
|
412 |
+
if is_torch_available():
|
413 |
+
from .modeling_utils import load_state_dict
|
414 |
+
else:
|
415 |
+
raise EnvironmentError(
|
416 |
+
"Can't load the model in PyTorch format because PyTorch is not installed. "
|
417 |
+
"Please, install PyTorch or use native Flax weights."
|
418 |
+
)
|
419 |
+
|
420 |
+
# Step 1: Get the pytorch file
|
421 |
+
pytorch_model_file = load_state_dict(model_file)
|
422 |
+
|
423 |
+
# Step 2: Convert the weights
|
424 |
+
state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model)
|
425 |
+
else:
|
426 |
+
try:
|
427 |
+
with open(model_file, "rb") as state_f:
|
428 |
+
state = from_bytes(cls, state_f.read())
|
429 |
+
except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
|
430 |
+
try:
|
431 |
+
with open(model_file) as f:
|
432 |
+
if f.read().startswith("version"):
|
433 |
+
raise OSError(
|
434 |
+
"You seem to have cloned a repository without having git-lfs installed. Please"
|
435 |
+
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
|
436 |
+
" folder you cloned."
|
437 |
+
)
|
438 |
+
else:
|
439 |
+
raise ValueError from e
|
440 |
+
except (UnicodeDecodeError, ValueError):
|
441 |
+
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ")
|
442 |
+
# make sure all arrays are stored as jnp.ndarray
|
443 |
+
# NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:
|
444 |
+
# https://github.com/google/flax/issues/1261
|
445 |
+
state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state)
|
446 |
+
|
447 |
+
# flatten dicts
|
448 |
+
state = flatten_dict(state)
|
449 |
+
|
450 |
+
params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0))
|
451 |
+
required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys())
|
452 |
+
|
453 |
+
shape_state = flatten_dict(unfreeze(params_shape_tree))
|
454 |
+
|
455 |
+
missing_keys = required_params - set(state.keys())
|
456 |
+
unexpected_keys = set(state.keys()) - required_params
|
457 |
+
|
458 |
+
if missing_keys:
|
459 |
+
logger.warning(
|
460 |
+
f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. "
|
461 |
+
"Make sure to call model.init_weights to initialize the missing weights."
|
462 |
+
)
|
463 |
+
cls._missing_keys = missing_keys
|
464 |
+
|
465 |
+
for key in state.keys():
|
466 |
+
if key in shape_state and state[key].shape != shape_state[key].shape:
|
467 |
+
raise ValueError(
|
468 |
+
f"Trying to load the pretrained weight for {key} failed: checkpoint has shape "
|
469 |
+
f"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. "
|
470 |
+
)
|
471 |
+
|
472 |
+
# remove unexpected keys to not be saved again
|
473 |
+
for unexpected_key in unexpected_keys:
|
474 |
+
del state[unexpected_key]
|
475 |
+
|
476 |
+
if len(unexpected_keys) > 0:
|
477 |
+
logger.warning(
|
478 |
+
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
|
479 |
+
f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
|
480 |
+
f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
|
481 |
+
" with another architecture."
|
482 |
+
)
|
483 |
+
else:
|
484 |
+
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
|
485 |
+
|
486 |
+
if len(missing_keys) > 0:
|
487 |
+
logger.warning(
|
488 |
+
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
|
489 |
+
f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
|
490 |
+
" TRAIN this model on a down-stream task to be able to use it for predictions and inference."
|
491 |
+
)
|
492 |
+
else:
|
493 |
+
logger.info(
|
494 |
+
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
|
495 |
+
f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint"
|
496 |
+
f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
|
497 |
+
" training."
|
498 |
+
)
|
499 |
+
|
500 |
+
return model, unflatten_dict(state)
|
501 |
+
|
502 |
+
def save_pretrained(
|
503 |
+
self,
|
504 |
+
save_directory: Union[str, os.PathLike],
|
505 |
+
params: Union[Dict, FrozenDict],
|
506 |
+
is_main_process: bool = True,
|
507 |
+
push_to_hub: bool = False,
|
508 |
+
**kwargs,
|
509 |
+
):
|
510 |
+
"""
|
511 |
+
Save a model and its configuration file to a directory so that it can be reloaded using the
|
512 |
+
[`~FlaxModelMixin.from_pretrained`] class method.
|
513 |
+
|
514 |
+
Arguments:
|
515 |
+
save_directory (`str` or `os.PathLike`):
|
516 |
+
Directory to save a model and its configuration file to. Will be created if it doesn't exist.
|
517 |
+
params (`Union[Dict, FrozenDict]`):
|
518 |
+
A `PyTree` of model parameters.
|
519 |
+
is_main_process (`bool`, *optional*, defaults to `True`):
|
520 |
+
Whether the process calling this is the main process or not. Useful during distributed training and you
|
521 |
+
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
|
522 |
+
process to avoid race conditions.
|
523 |
+
push_to_hub (`bool`, *optional*, defaults to `False`):
|
524 |
+
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
|
525 |
+
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
|
526 |
+
namespace).
|
527 |
+
kwargs (`Dict[str, Any]`, *optional*):
|
528 |
+
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
|
529 |
+
"""
|
530 |
+
if os.path.isfile(save_directory):
|
531 |
+
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
|
532 |
+
return
|
533 |
+
|
534 |
+
os.makedirs(save_directory, exist_ok=True)
|
535 |
+
|
536 |
+
if push_to_hub:
|
537 |
+
commit_message = kwargs.pop("commit_message", None)
|
538 |
+
private = kwargs.pop("private", False)
|
539 |
+
create_pr = kwargs.pop("create_pr", False)
|
540 |
+
token = kwargs.pop("token", None)
|
541 |
+
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
|
542 |
+
repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
|
543 |
+
|
544 |
+
model_to_save = self
|
545 |
+
|
546 |
+
# Attach architecture to the config
|
547 |
+
# Save the config
|
548 |
+
if is_main_process:
|
549 |
+
model_to_save.save_config(save_directory)
|
550 |
+
|
551 |
+
# save model
|
552 |
+
output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME)
|
553 |
+
with open(output_model_file, "wb") as f:
|
554 |
+
model_bytes = to_bytes(params)
|
555 |
+
f.write(model_bytes)
|
556 |
+
|
557 |
+
logger.info(f"Model weights saved in {output_model_file}")
|
558 |
+
|
559 |
+
if push_to_hub:
|
560 |
+
self._upload_folder(
|
561 |
+
save_directory,
|
562 |
+
repo_id,
|
563 |
+
token=token,
|
564 |
+
commit_message=commit_message,
|
565 |
+
create_pr=create_pr,
|
566 |
+
)
|
diffusers/models/modeling_outputs.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
|
3 |
+
from ..utils import BaseOutput
|
4 |
+
|
5 |
+
|
6 |
+
@dataclass
|
7 |
+
class AutoencoderKLOutput(BaseOutput):
|
8 |
+
"""
|
9 |
+
Output of AutoencoderKL encoding method.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
latent_dist (`DiagonalGaussianDistribution`):
|
13 |
+
Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
|
14 |
+
`DiagonalGaussianDistribution` allows for sampling latents from the distribution.
|
15 |
+
"""
|
16 |
+
|
17 |
+
latent_dist: "DiagonalGaussianDistribution" # noqa: F821
|
diffusers/models/modeling_pytorch_flax_utils.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch - Flax general utilities."""
|
16 |
+
|
17 |
+
from pickle import UnpicklingError
|
18 |
+
|
19 |
+
import jax
|
20 |
+
import jax.numpy as jnp
|
21 |
+
import numpy as np
|
22 |
+
from flax.serialization import from_bytes
|
23 |
+
from flax.traverse_util import flatten_dict
|
24 |
+
|
25 |
+
from ..utils import logging
|
26 |
+
|
27 |
+
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
+
|
30 |
+
|
31 |
+
#####################
|
32 |
+
# Flax => PyTorch #
|
33 |
+
#####################
|
34 |
+
|
35 |
+
|
36 |
+
# from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_flax_pytorch_utils.py#L224-L352
|
37 |
+
def load_flax_checkpoint_in_pytorch_model(pt_model, model_file):
|
38 |
+
try:
|
39 |
+
with open(model_file, "rb") as flax_state_f:
|
40 |
+
flax_state = from_bytes(None, flax_state_f.read())
|
41 |
+
except UnpicklingError as e:
|
42 |
+
try:
|
43 |
+
with open(model_file) as f:
|
44 |
+
if f.read().startswith("version"):
|
45 |
+
raise OSError(
|
46 |
+
"You seem to have cloned a repository without having git-lfs installed. Please"
|
47 |
+
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
|
48 |
+
" folder you cloned."
|
49 |
+
)
|
50 |
+
else:
|
51 |
+
raise ValueError from e
|
52 |
+
except (UnicodeDecodeError, ValueError):
|
53 |
+
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ")
|
54 |
+
|
55 |
+
return load_flax_weights_in_pytorch_model(pt_model, flax_state)
|
56 |
+
|
57 |
+
|
58 |
+
def load_flax_weights_in_pytorch_model(pt_model, flax_state):
|
59 |
+
"""Load flax checkpoints in a PyTorch model"""
|
60 |
+
|
61 |
+
try:
|
62 |
+
import torch # noqa: F401
|
63 |
+
except ImportError:
|
64 |
+
logger.error(
|
65 |
+
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
|
66 |
+
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
|
67 |
+
" instructions."
|
68 |
+
)
|
69 |
+
raise
|
70 |
+
|
71 |
+
# check if we have bf16 weights
|
72 |
+
is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values()
|
73 |
+
if any(is_type_bf16):
|
74 |
+
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
|
75 |
+
|
76 |
+
# and bf16 is not fully supported in PT yet.
|
77 |
+
logger.warning(
|
78 |
+
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
|
79 |
+
"before loading those in PyTorch model."
|
80 |
+
)
|
81 |
+
flax_state = jax.tree_util.tree_map(
|
82 |
+
lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state
|
83 |
+
)
|
84 |
+
|
85 |
+
pt_model.base_model_prefix = ""
|
86 |
+
|
87 |
+
flax_state_dict = flatten_dict(flax_state, sep=".")
|
88 |
+
pt_model_dict = pt_model.state_dict()
|
89 |
+
|
90 |
+
# keep track of unexpected & missing keys
|
91 |
+
unexpected_keys = []
|
92 |
+
missing_keys = set(pt_model_dict.keys())
|
93 |
+
|
94 |
+
for flax_key_tuple, flax_tensor in flax_state_dict.items():
|
95 |
+
flax_key_tuple_array = flax_key_tuple.split(".")
|
96 |
+
|
97 |
+
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
|
98 |
+
flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"]
|
99 |
+
flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1))
|
100 |
+
elif flax_key_tuple_array[-1] == "kernel":
|
101 |
+
flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"]
|
102 |
+
flax_tensor = flax_tensor.T
|
103 |
+
elif flax_key_tuple_array[-1] == "scale":
|
104 |
+
flax_key_tuple_array = flax_key_tuple_array[:-1] + ["weight"]
|
105 |
+
|
106 |
+
if "time_embedding" not in flax_key_tuple_array:
|
107 |
+
for i, flax_key_tuple_string in enumerate(flax_key_tuple_array):
|
108 |
+
flax_key_tuple_array[i] = (
|
109 |
+
flax_key_tuple_string.replace("_0", ".0")
|
110 |
+
.replace("_1", ".1")
|
111 |
+
.replace("_2", ".2")
|
112 |
+
.replace("_3", ".3")
|
113 |
+
.replace("_4", ".4")
|
114 |
+
.replace("_5", ".5")
|
115 |
+
.replace("_6", ".6")
|
116 |
+
.replace("_7", ".7")
|
117 |
+
.replace("_8", ".8")
|
118 |
+
.replace("_9", ".9")
|
119 |
+
)
|
120 |
+
|
121 |
+
flax_key = ".".join(flax_key_tuple_array)
|
122 |
+
|
123 |
+
if flax_key in pt_model_dict:
|
124 |
+
if flax_tensor.shape != pt_model_dict[flax_key].shape:
|
125 |
+
raise ValueError(
|
126 |
+
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
|
127 |
+
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}."
|
128 |
+
)
|
129 |
+
else:
|
130 |
+
# add weight to pytorch dict
|
131 |
+
flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor
|
132 |
+
pt_model_dict[flax_key] = torch.from_numpy(flax_tensor)
|
133 |
+
# remove from missing keys
|
134 |
+
missing_keys.remove(flax_key)
|
135 |
+
else:
|
136 |
+
# weight is not expected by PyTorch model
|
137 |
+
unexpected_keys.append(flax_key)
|
138 |
+
|
139 |
+
pt_model.load_state_dict(pt_model_dict)
|
140 |
+
|
141 |
+
# re-transform missing_keys to list
|
142 |
+
missing_keys = list(missing_keys)
|
143 |
+
|
144 |
+
if len(unexpected_keys) > 0:
|
145 |
+
logger.warning(
|
146 |
+
"Some weights of the Flax model were not used when initializing the PyTorch model"
|
147 |
+
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
|
148 |
+
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
|
149 |
+
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
|
150 |
+
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
|
151 |
+
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
|
152 |
+
" FlaxBertForSequenceClassification model)."
|
153 |
+
)
|
154 |
+
if len(missing_keys) > 0:
|
155 |
+
logger.warning(
|
156 |
+
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
|
157 |
+
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
|
158 |
+
" use it for predictions and inference."
|
159 |
+
)
|
160 |
+
|
161 |
+
return pt_model
|