Yntec commited on
Commit
93ade78
1 Parent(s): 88ee23f

Create convert_repo_to_safetensors_sd_gr.py

Browse files
Files changed (1) hide show
  1. convert_repo_to_safetensors_sd_gr.py +401 -0
convert_repo_to_safetensors_sd_gr.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
2
+ # *Only* converts the UNet, VAE, and Text Encoder.
3
+ # Does not convert optimizer state or any other thing.
4
+
5
+ import argparse
6
+ import os.path as osp
7
+ import re
8
+
9
+ import torch
10
+ from safetensors.torch import load_file, save_file
11
+ import gradio as gr
12
+
13
+
14
+ # =================#
15
+ # UNet Conversion #
16
+ # =================#
17
+
18
+ unet_conversion_map = [
19
+ # (stable-diffusion, HF Diffusers)
20
+ ("time_embed.0.weight", "time_embedding.linear_1.weight"),
21
+ ("time_embed.0.bias", "time_embedding.linear_1.bias"),
22
+ ("time_embed.2.weight", "time_embedding.linear_2.weight"),
23
+ ("time_embed.2.bias", "time_embedding.linear_2.bias"),
24
+ ("input_blocks.0.0.weight", "conv_in.weight"),
25
+ ("input_blocks.0.0.bias", "conv_in.bias"),
26
+ ("out.0.weight", "conv_norm_out.weight"),
27
+ ("out.0.bias", "conv_norm_out.bias"),
28
+ ("out.2.weight", "conv_out.weight"),
29
+ ("out.2.bias", "conv_out.bias"),
30
+ ]
31
+
32
+ unet_conversion_map_resnet = [
33
+ # (stable-diffusion, HF Diffusers)
34
+ ("in_layers.0", "norm1"),
35
+ ("in_layers.2", "conv1"),
36
+ ("out_layers.0", "norm2"),
37
+ ("out_layers.3", "conv2"),
38
+ ("emb_layers.1", "time_emb_proj"),
39
+ ("skip_connection", "conv_shortcut"),
40
+ ]
41
+
42
+ unet_conversion_map_layer = []
43
+ # hardcoded number of downblocks and resnets/attentions...
44
+ # would need smarter logic for other networks.
45
+ for i in range(4):
46
+ # loop over downblocks/upblocks
47
+
48
+ for j in range(2):
49
+ # loop over resnets/attentions for downblocks
50
+ hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
51
+ sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
52
+ unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
53
+
54
+ if i < 3:
55
+ # no attention layers in down_blocks.3
56
+ hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
57
+ sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
58
+ unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
59
+
60
+ for j in range(3):
61
+ # loop over resnets/attentions for upblocks
62
+ hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
63
+ sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
64
+ unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
65
+
66
+ if i > 0:
67
+ # no attention layers in up_blocks.0
68
+ hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
69
+ sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
70
+ unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
71
+
72
+ if i < 3:
73
+ # no downsample in down_blocks.3
74
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
75
+ sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
76
+ unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
77
+
78
+ # no upsample in up_blocks.3
79
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
80
+ sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
81
+ unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
82
+
83
+ hf_mid_atn_prefix = "mid_block.attentions.0."
84
+ sd_mid_atn_prefix = "middle_block.1."
85
+ unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
86
+
87
+ for j in range(2):
88
+ hf_mid_res_prefix = f"mid_block.resnets.{j}."
89
+ sd_mid_res_prefix = f"middle_block.{2*j}."
90
+ unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
91
+
92
+
93
+ def convert_unet_state_dict(unet_state_dict):
94
+ # buyer beware: this is a *brittle* function,
95
+ # and correct output requires that all of these pieces interact in
96
+ # the exact order in which I have arranged them.
97
+ mapping = {k: k for k in unet_state_dict.keys()}
98
+ for sd_name, hf_name in unet_conversion_map:
99
+ mapping[hf_name] = sd_name
100
+ for k, v in mapping.items():
101
+ if "resnets" in k:
102
+ for sd_part, hf_part in unet_conversion_map_resnet:
103
+ v = v.replace(hf_part, sd_part)
104
+ mapping[k] = v
105
+ for k, v in mapping.items():
106
+ for sd_part, hf_part in unet_conversion_map_layer:
107
+ v = v.replace(hf_part, sd_part)
108
+ mapping[k] = v
109
+ new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
110
+ return new_state_dict
111
+
112
+
113
+ # ================#
114
+ # VAE Conversion #
115
+ # ================#
116
+
117
+ vae_conversion_map = [
118
+ # (stable-diffusion, HF Diffusers)
119
+ ("nin_shortcut", "conv_shortcut"),
120
+ ("norm_out", "conv_norm_out"),
121
+ ("mid.attn_1.", "mid_block.attentions.0."),
122
+ ]
123
+
124
+ for i in range(4):
125
+ # down_blocks have two resnets
126
+ for j in range(2):
127
+ hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
128
+ sd_down_prefix = f"encoder.down.{i}.block.{j}."
129
+ vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
130
+
131
+ if i < 3:
132
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
133
+ sd_downsample_prefix = f"down.{i}.downsample."
134
+ vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
135
+
136
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
137
+ sd_upsample_prefix = f"up.{3-i}.upsample."
138
+ vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
139
+
140
+ # up_blocks have three resnets
141
+ # also, up blocks in hf are numbered in reverse from sd
142
+ for j in range(3):
143
+ hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
144
+ sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
145
+ vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
146
+
147
+ # this part accounts for mid blocks in both the encoder and the decoder
148
+ for i in range(2):
149
+ hf_mid_res_prefix = f"mid_block.resnets.{i}."
150
+ sd_mid_res_prefix = f"mid.block_{i+1}."
151
+ vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
152
+
153
+
154
+ vae_conversion_map_attn = [
155
+ # (stable-diffusion, HF Diffusers)
156
+ ("norm.", "group_norm."),
157
+ ("q.", "query."),
158
+ ("k.", "key."),
159
+ ("v.", "value."),
160
+ ("proj_out.", "proj_attn."),
161
+ ]
162
+
163
+ # This is probably not the most ideal solution, but it does work.
164
+ vae_extra_conversion_map = [
165
+ ("to_q", "q"),
166
+ ("to_k", "k"),
167
+ ("to_v", "v"),
168
+ ("to_out.0", "proj_out"),
169
+ ]
170
+
171
+
172
+ def reshape_weight_for_sd(w):
173
+ # convert HF linear weights to SD conv2d weights
174
+ if not w.ndim == 1:
175
+ return w.reshape(*w.shape, 1, 1)
176
+ else:
177
+ return w
178
+
179
+
180
+ def convert_vae_state_dict(vae_state_dict):
181
+ mapping = {k: k for k in vae_state_dict.keys()}
182
+ for k, v in mapping.items():
183
+ for sd_part, hf_part in vae_conversion_map:
184
+ v = v.replace(hf_part, sd_part)
185
+ mapping[k] = v
186
+ for k, v in mapping.items():
187
+ if "attentions" in k:
188
+ for sd_part, hf_part in vae_conversion_map_attn:
189
+ v = v.replace(hf_part, sd_part)
190
+ mapping[k] = v
191
+ new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
192
+ weights_to_convert = ["q", "k", "v", "proj_out"]
193
+ keys_to_rename = {}
194
+ for k, v in new_state_dict.items():
195
+ for weight_name in weights_to_convert:
196
+ if f"mid.attn_1.{weight_name}.weight" in k:
197
+ print(f"Reshaping {k} for SD format")
198
+ new_state_dict[k] = reshape_weight_for_sd(v)
199
+ for weight_name, real_weight_name in vae_extra_conversion_map:
200
+ if f"mid.attn_1.{weight_name}.weight" in k or f"mid.attn_1.{weight_name}.bias" in k:
201
+ keys_to_rename[k] = k.replace(weight_name, real_weight_name)
202
+ for k, v in keys_to_rename.items():
203
+ if k in new_state_dict:
204
+ print(f"Renaming {k} to {v}")
205
+ new_state_dict[v] = reshape_weight_for_sd(new_state_dict[k])
206
+ del new_state_dict[k]
207
+ return new_state_dict
208
+
209
+
210
+ # =========================#
211
+ # Text Encoder Conversion #
212
+ # =========================#
213
+
214
+
215
+ textenc_conversion_lst = [
216
+ # (stable-diffusion, HF Diffusers)
217
+ ("resblocks.", "text_model.encoder.layers."),
218
+ ("ln_1", "layer_norm1"),
219
+ ("ln_2", "layer_norm2"),
220
+ (".c_fc.", ".fc1."),
221
+ (".c_proj.", ".fc2."),
222
+ (".attn", ".self_attn"),
223
+ ("ln_final.", "transformer.text_model.final_layer_norm."),
224
+ ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
225
+ ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
226
+ ]
227
+ protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
228
+ textenc_pattern = re.compile("|".join(protected.keys()))
229
+
230
+ # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
231
+ code2idx = {"q": 0, "k": 1, "v": 2}
232
+
233
+
234
+ def convert_text_enc_state_dict_v20(text_enc_dict):
235
+ new_state_dict = {}
236
+ capture_qkv_weight = {}
237
+ capture_qkv_bias = {}
238
+ for k, v in text_enc_dict.items():
239
+ if (
240
+ k.endswith(".self_attn.q_proj.weight")
241
+ or k.endswith(".self_attn.k_proj.weight")
242
+ or k.endswith(".self_attn.v_proj.weight")
243
+ ):
244
+ k_pre = k[: -len(".q_proj.weight")]
245
+ k_code = k[-len("q_proj.weight")]
246
+ if k_pre not in capture_qkv_weight:
247
+ capture_qkv_weight[k_pre] = [None, None, None]
248
+ capture_qkv_weight[k_pre][code2idx[k_code]] = v
249
+ continue
250
+
251
+ if (
252
+ k.endswith(".self_attn.q_proj.bias")
253
+ or k.endswith(".self_attn.k_proj.bias")
254
+ or k.endswith(".self_attn.v_proj.bias")
255
+ ):
256
+ k_pre = k[: -len(".q_proj.bias")]
257
+ k_code = k[-len("q_proj.bias")]
258
+ if k_pre not in capture_qkv_bias:
259
+ capture_qkv_bias[k_pre] = [None, None, None]
260
+ capture_qkv_bias[k_pre][code2idx[k_code]] = v
261
+ continue
262
+
263
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)
264
+ new_state_dict[relabelled_key] = v
265
+
266
+ for k_pre, tensors in capture_qkv_weight.items():
267
+ if None in tensors:
268
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
269
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
270
+ new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors)
271
+
272
+ for k_pre, tensors in capture_qkv_bias.items():
273
+ if None in tensors:
274
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
275
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
276
+ new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors)
277
+
278
+ return new_state_dict
279
+
280
+
281
+ def convert_text_enc_state_dict(text_enc_dict):
282
+ return text_enc_dict
283
+
284
+
285
+ def convert_diffusers_to_safetensors(model_path, checkpoint_path, half = True, progress=gr.Progress(track_tqdm=True)):
286
+ progress(0, desc="Start converting...")
287
+ # Path for safetensors
288
+ unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.safetensors")
289
+ vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.safetensors")
290
+ text_enc_path = osp.join(model_path, "text_encoder", "model.safetensors")
291
+
292
+ # Load models from safetensors if it exists, if it doesn't pytorch
293
+ if osp.exists(unet_path):
294
+ unet_state_dict = load_file(unet_path, device="cpu")
295
+ else:
296
+ unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin")
297
+ unet_state_dict = torch.load(unet_path, map_location="cpu")
298
+
299
+ if osp.exists(vae_path):
300
+ vae_state_dict = load_file(vae_path, device="cpu")
301
+ else:
302
+ vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin")
303
+ vae_state_dict = torch.load(vae_path, map_location="cpu")
304
+
305
+ if osp.exists(text_enc_path):
306
+ text_enc_dict = load_file(text_enc_path, device="cpu")
307
+ else:
308
+ text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin")
309
+ text_enc_dict = torch.load(text_enc_path, map_location="cpu")
310
+
311
+ # Convert the UNet model
312
+ unet_state_dict = convert_unet_state_dict(unet_state_dict)
313
+ unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
314
+
315
+ # Convert the VAE model
316
+ vae_state_dict = convert_vae_state_dict(vae_state_dict)
317
+ vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
318
+
319
+ # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
320
+ is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
321
+
322
+ if is_v20_model:
323
+ # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
324
+ text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()}
325
+ text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict)
326
+ text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
327
+ else:
328
+ text_enc_dict = convert_text_enc_state_dict(text_enc_dict)
329
+ text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
330
+
331
+ # Put together new checkpoint
332
+ state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
333
+ if half:
334
+ state_dict = {k: v.half() for k, v in state_dict.items()}
335
+
336
+ save_file(state_dict, checkpoint_path)
337
+
338
+ progress(1, desc="Converted.")
339
+
340
+
341
+ def download_repo(repo_id, dir_path, progress=gr.Progress(track_tqdm=True)):
342
+ from huggingface_hub import snapshot_download
343
+ try:
344
+ snapshot_download(repo_id=repo_id, local_dir=dir_path)
345
+ except Exception as e:
346
+ print(f"Error: Failed to download {repo_id}. ")
347
+ return
348
+
349
+
350
+ def upload_safetensors_to_repo(filename, progress=gr.Progress(track_tqdm=True)):
351
+ from huggingface_hub import HfApi, hf_hub_url
352
+ import os
353
+ from pathlib import Path
354
+ output_filename = Path(filename).name
355
+ hf_token = os.environ.get("HF_TOKEN")
356
+ repo_id = os.environ.get("HF_OUTPUT_REPO")
357
+ api = HfApi()
358
+ try:
359
+ progress(0, desc="Start uploading...")
360
+ api.upload_file(path_or_fileobj=filename, path_in_repo=output_filename, repo_id=repo_id, token=hf_token)
361
+ progress(1, desc="Uploaded.")
362
+ url = hf_hub_url(repo_id=repo_id, filename=output_filename)
363
+ except Exception as e:
364
+ print(f"Error: Failed to upload to {repo_id}. ")
365
+ return None
366
+ return url
367
+
368
+
369
+ def convert_repo_to_safetensors(repo_id, half = True, progress=gr.Progress(track_tqdm=True)):
370
+ download_dir = f"{repo_id.split('/')[0]}_{repo_id.split('/')[-1]}"
371
+ output_filename = f"{repo_id.split('/')[0]}_{repo_id.split('/')[-1]}.safetensors"
372
+ download_repo(repo_id, download_dir)
373
+ convert_diffusers_to_safetensors(download_dir, output_filename, half)
374
+ return output_filename
375
+
376
+
377
+ def convert_repo_to_safetensors_multi_sd(repo_id, files, is_upload, urls, half=True, progress=gr.Progress(track_tqdm=True)):
378
+ file = convert_repo_to_safetensors(repo_id, half)
379
+ if not urls: urls = []
380
+ url = ""
381
+ if is_upload:
382
+ url = upload_safetensors_to_repo(file)
383
+ if url: urls.append(url)
384
+ md = ""
385
+ for u in urls:
386
+ md += f"[Download {str(u).split('/')[-1]}]({str(u)})<br>"
387
+ if not files: files = []
388
+ files.append(file)
389
+ return gr.update(value=files), gr.update(value=urls, choices=urls), gr.update(value=md)
390
+
391
+
392
+ if __name__ == "__main__":
393
+ parser = argparse.ArgumentParser()
394
+
395
+ parser.add_argument("--repo_id", default=None, type=str, required=True, help="HF Repo ID of the model to convert.")
396
+ parser.add_argument("--half", default=True, help="Save weights in half precision.")
397
+
398
+ args = parser.parse_args()
399
+ assert args.repo_id is not None, "Must provide a Repo ID!"
400
+
401
+ convert_repo_to_safetensors(args.repo_id, args.half)