sovitrath commited on
Commit
a9b0289
1 Parent(s): 72b389c

Upload train_multi_subject_dreambooth_inpainting_custom.py

Browse files
train_multi_subject_dreambooth_inpainting_custom.py ADDED
@@ -0,0 +1,670 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import copy
3
+ import itertools
4
+ import logging
5
+ import math
6
+ import os
7
+ import random
8
+ from pathlib import Path
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+ import torch.utils.checkpoint
14
+ from accelerate import Accelerator
15
+ from accelerate.logging import get_logger
16
+ from accelerate.utils import ProjectConfiguration, set_seed
17
+ from datasets import concatenate_datasets, load_dataset
18
+ from PIL import Image
19
+ from torch.utils.data import Dataset
20
+ from torchvision import transforms
21
+ from tqdm.auto import tqdm
22
+ from transformers import CLIPTextModel, CLIPTokenizer
23
+
24
+ from diffusers import (
25
+ AutoencoderKL,
26
+ DDPMScheduler,
27
+ StableDiffusionInpaintPipeline,
28
+ UNet2DConditionModel,
29
+ )
30
+ from diffusers.optimization import get_scheduler
31
+ from diffusers.utils import check_min_version, is_wandb_available
32
+
33
+
34
+ if is_wandb_available():
35
+ import wandb
36
+
37
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
38
+ check_min_version("0.13.0.dev0")
39
+
40
+ logger = get_logger(__name__)
41
+
42
+
43
+ def parse_args():
44
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
45
+ parser.add_argument(
46
+ "--pretrained_model_name_or_path",
47
+ type=str,
48
+ default=None,
49
+ required=True,
50
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
51
+ )
52
+ parser.add_argument("--instance_data_dir", nargs="+", help="Instance data directories")
53
+ parser.add_argument(
54
+ "--output_dir",
55
+ type=str,
56
+ default="text-inversion-model",
57
+ help="The output directory where the model predictions and checkpoints will be written.",
58
+ )
59
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
60
+ parser.add_argument(
61
+ "--resolution",
62
+ type=int,
63
+ default=512,
64
+ help=(
65
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
66
+ " resolution"
67
+ ),
68
+ )
69
+ parser.add_argument(
70
+ "--train_text_encoder", default=False, action="store_true", help="Whether to train the text encoder"
71
+ )
72
+ parser.add_argument(
73
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
74
+ )
75
+ parser.add_argument(
76
+ "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
77
+ )
78
+ parser.add_argument(
79
+ "--max_train_steps",
80
+ type=int,
81
+ default=None,
82
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
83
+ )
84
+ parser.add_argument(
85
+ "--gradient_accumulation_steps",
86
+ type=int,
87
+ default=1,
88
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
89
+ )
90
+ parser.add_argument(
91
+ "--learning_rate",
92
+ type=float,
93
+ default=5e-6,
94
+ help="Initial learning rate (after the potential warmup period) to use.",
95
+ )
96
+ parser.add_argument(
97
+ "--scale_lr",
98
+ action="store_true",
99
+ default=False,
100
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
101
+ )
102
+ parser.add_argument(
103
+ "--lr_scheduler",
104
+ type=str,
105
+ default="constant",
106
+ help=(
107
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
108
+ ' "constant", "constant_with_warmup"]'
109
+ ),
110
+ )
111
+ parser.add_argument(
112
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
113
+ )
114
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
115
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
116
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
117
+ parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
118
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
119
+ parser.add_argument(
120
+ "--logging_dir",
121
+ type=str,
122
+ default="logs",
123
+ help=(
124
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
125
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
126
+ ),
127
+ )
128
+ parser.add_argument(
129
+ "--mixed_precision",
130
+ type=str,
131
+ default="no",
132
+ choices=["no", "fp16", "bf16"],
133
+ help=(
134
+ "Whether to use mixed precision. Choose"
135
+ "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
136
+ "and an Nvidia Ampere GPU."
137
+ ),
138
+ )
139
+ parser.add_argument(
140
+ "--checkpointing_steps",
141
+ type=int,
142
+ default=1000,
143
+ help=(
144
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
145
+ " checkpoints in case they are better than the last checkpoint and are suitable for resuming training"
146
+ " using `--resume_from_checkpoint`."
147
+ ),
148
+ )
149
+ parser.add_argument(
150
+ "--checkpointing_from",
151
+ type=int,
152
+ default=1000,
153
+ help=("Start to checkpoint from step"),
154
+ )
155
+ parser.add_argument(
156
+ "--validation_steps",
157
+ type=int,
158
+ default=50,
159
+ help=(
160
+ "Run validation every X steps. Validation consists of running the prompt"
161
+ " `args.validation_prompt` multiple times: `args.num_validation_images`"
162
+ " and logging the images."
163
+ ),
164
+ )
165
+ parser.add_argument(
166
+ "--validation_from",
167
+ type=int,
168
+ default=0,
169
+ help=("Start to validate from step"),
170
+ )
171
+ parser.add_argument(
172
+ "--checkpoints_total_limit",
173
+ type=int,
174
+ default=None,
175
+ help=(
176
+ "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
177
+ " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
178
+ " for more docs"
179
+ ),
180
+ )
181
+ parser.add_argument(
182
+ "--resume_from_checkpoint",
183
+ type=str,
184
+ default=None,
185
+ help=(
186
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
187
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
188
+ ),
189
+ )
190
+ parser.add_argument(
191
+ "--validation_project_name",
192
+ type=str,
193
+ default=None,
194
+ help="The w&b name.",
195
+ )
196
+ parser.add_argument(
197
+ "--report_to_wandb", default=False, action="store_true", help="Whether to report to weights and biases"
198
+ )
199
+
200
+ args = parser.parse_args()
201
+
202
+ return args
203
+
204
+
205
+ def prepare_mask_and_masked_image(image, mask):
206
+ image = np.array(image.convert("RGB"))
207
+ image = image[None].transpose(0, 3, 1, 2)
208
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
209
+
210
+ mask = np.array(mask.convert("L"))
211
+ mask = mask.astype(np.float32) / 255.0
212
+ mask = mask[None, None]
213
+ mask[mask < 0.5] = 0
214
+ mask[mask >= 0.5] = 1
215
+ mask = torch.from_numpy(mask)
216
+
217
+ masked_image = image * (mask < 0.5)
218
+
219
+ return mask, masked_image
220
+
221
+
222
+ class DreamBoothDataset(Dataset):
223
+ def __init__(
224
+ self,
225
+ tokenizer,
226
+ datasets_paths,
227
+ ):
228
+ self.tokenizer = tokenizer
229
+ self.datasets_paths = (datasets_paths,)
230
+ self.datasets = [load_dataset(dataset_path) for dataset_path in self.datasets_paths[0]]
231
+ self.train_data = concatenate_datasets([dataset["train"] for dataset in self.datasets])
232
+ self.test_data = concatenate_datasets([dataset["test"] for dataset in self.datasets])
233
+
234
+ self.image_normalize = transforms.Compose(
235
+ [
236
+ transforms.ToTensor(),
237
+ transforms.Normalize([0.5], [0.5]),
238
+ ]
239
+ )
240
+
241
+ def set_image(self, img, switch):
242
+ if img.mode not in ["RGB", "L"]:
243
+ img = img.convert("RGB")
244
+
245
+ if switch:
246
+ img = img.transpose(Image.FLIP_LEFT_RIGHT)
247
+
248
+ img = img.resize((512, 512), Image.BILINEAR)
249
+
250
+ return img
251
+
252
+ def __len__(self):
253
+ return len(self.train_data)
254
+
255
+ def __getitem__(self, index):
256
+ # Lettings
257
+ example = {}
258
+ img_idx = index % len(self.train_data)
259
+ switch = random.choice([True, False])
260
+
261
+ # Load image
262
+ image = self.set_image(self.train_data[img_idx]["image"], switch)
263
+
264
+ # Normalize image
265
+ image_norm = self.image_normalize(image)
266
+
267
+ # Tokenise prompt
268
+ tokenized_prompt = self.tokenizer(
269
+ self.train_data[img_idx]["prompt"],
270
+ padding="do_not_pad",
271
+ truncation=True,
272
+ max_length=self.tokenizer.model_max_length,
273
+ ).input_ids
274
+
275
+ # Load masks for image
276
+ masks = [
277
+ self.set_image(self.train_data[img_idx][key], switch) for key in self.train_data[img_idx] if "mask" in key
278
+ ]
279
+
280
+ # Build example
281
+ example["PIL_image"] = image
282
+ example["instance_image"] = image_norm
283
+ example["instance_prompt_id"] = tokenized_prompt
284
+ example["instance_masks"] = masks
285
+
286
+ return example
287
+
288
+
289
+ def weighted_mask(masks):
290
+ # Convert each mask to a NumPy array and ensure it's binary
291
+ mask_arrays = [np.array(mask) / 255 for mask in masks] # Normalizing to 0-1 range
292
+
293
+ # Generate random weights and apply them to each mask
294
+ weights = [random.random() for _ in masks]
295
+ weights = [weight / sum(weights) for weight in weights]
296
+ weighted_masks = [mask * weight for mask, weight in zip(mask_arrays, weights)]
297
+
298
+ # Sum the weighted masks
299
+ summed_mask = np.sum(weighted_masks, axis=0)
300
+
301
+ # Apply a threshold to create the final mask
302
+ threshold = 0.5 # This threshold can be adjusted
303
+ result_mask = summed_mask >= threshold
304
+
305
+ # Convert the result back to a PIL image
306
+ return Image.fromarray(result_mask.astype(np.uint8) * 255)
307
+
308
+
309
+ def collate_fn(examples, tokenizer):
310
+ input_ids = [example["instance_prompt_id"] for example in examples]
311
+ pixel_values = [example["instance_image"] for example in examples]
312
+
313
+ masks, masked_images = [], []
314
+
315
+ for example in examples:
316
+ # generate a random mask
317
+ mask = weighted_mask(example["instance_masks"])
318
+
319
+ # prepare mask and masked image
320
+ mask, masked_image = prepare_mask_and_masked_image(example["PIL_image"], mask)
321
+
322
+ masks.append(mask)
323
+ masked_images.append(masked_image)
324
+
325
+ pixel_values = torch.stack(pixel_values).to(memory_format=torch.contiguous_format).float()
326
+ masks = torch.stack(masks)
327
+ masked_images = torch.stack(masked_images)
328
+ input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
329
+
330
+ batch = {"input_ids": input_ids, "pixel_values": pixel_values, "masks": masks, "masked_images": masked_images}
331
+
332
+ return batch
333
+
334
+
335
+ def log_validation(pipeline, text_encoder, unet, val_pairs, accelerator):
336
+ # update pipeline (note: unet and vae are loaded again in float32)
337
+ pipeline.text_encoder = accelerator.unwrap_model(text_encoder)
338
+ pipeline.unet = accelerator.unwrap_model(unet)
339
+
340
+ with torch.autocast("cuda"):
341
+ val_results = [{"data_or_path": pipeline(**pair).images[0], "caption": pair["prompt"]} for pair in val_pairs]
342
+
343
+ torch.cuda.empty_cache()
344
+
345
+ wandb.log({"validation": [wandb.Image(**val_result) for val_result in val_results]})
346
+
347
+
348
+ def checkpoint(args, global_step, accelerator):
349
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
350
+ accelerator.save_state(save_path)
351
+ logger.info(f"Saved state to {save_path}")
352
+
353
+
354
+ def main():
355
+ args = parse_args()
356
+
357
+ project_config = ProjectConfiguration(
358
+ total_limit=args.checkpoints_total_limit,
359
+ project_dir=args.output_dir,
360
+ logging_dir=Path(args.output_dir, args.logging_dir),
361
+ )
362
+
363
+ accelerator = Accelerator(
364
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
365
+ mixed_precision=args.mixed_precision,
366
+ project_config=project_config,
367
+ log_with="wandb" if args.report_to_wandb else None,
368
+ )
369
+
370
+ if args.report_to_wandb and not is_wandb_available():
371
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
372
+
373
+ if args.seed is not None:
374
+ set_seed(args.seed)
375
+
376
+ # Make one log on every process with the configuration for debugging.
377
+ logging.basicConfig(
378
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
379
+ datefmt="%m/%d/%Y %H:%M:%S",
380
+ level=logging.INFO,
381
+ )
382
+ logger.info(accelerator.state, main_process_only=False)
383
+
384
+ # Load the tokenizer & models and create wrapper for stable diffusion
385
+ tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
386
+ text_encoder = CLIPTextModel.from_pretrained(
387
+ args.pretrained_model_name_or_path, subfolder="text_encoder"
388
+ ).requires_grad_(args.train_text_encoder)
389
+ vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae").requires_grad_(False)
390
+ unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
391
+
392
+ if args.scale_lr:
393
+ args.learning_rate = (
394
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
395
+ )
396
+
397
+ optimizer = torch.optim.AdamW(
398
+ params=itertools.chain(unet.parameters(), text_encoder.parameters())
399
+ if args.train_text_encoder
400
+ else unet.parameters(),
401
+ lr=args.learning_rate,
402
+ betas=(args.adam_beta1, args.adam_beta2),
403
+ weight_decay=args.adam_weight_decay,
404
+ eps=args.adam_epsilon,
405
+ )
406
+
407
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
408
+
409
+ train_dataset = DreamBoothDataset(
410
+ tokenizer=tokenizer,
411
+ datasets_paths=args.instance_data_dir,
412
+ )
413
+
414
+ train_dataloader = torch.utils.data.DataLoader(
415
+ train_dataset,
416
+ batch_size=args.train_batch_size,
417
+ shuffle=True,
418
+ collate_fn=lambda examples: collate_fn(examples, tokenizer),
419
+ )
420
+
421
+ # Scheduler and math around the number of training steps.
422
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
423
+
424
+ lr_scheduler = get_scheduler(
425
+ args.lr_scheduler,
426
+ optimizer=optimizer,
427
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
428
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
429
+ )
430
+
431
+ if args.train_text_encoder:
432
+ unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
433
+ unet, text_encoder, optimizer, train_dataloader, lr_scheduler
434
+ )
435
+ else:
436
+ unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
437
+ unet, optimizer, train_dataloader, lr_scheduler
438
+ )
439
+
440
+ accelerator.register_for_checkpointing(lr_scheduler)
441
+
442
+ if args.mixed_precision == "fp16":
443
+ weight_dtype = torch.float16
444
+ elif args.mixed_precision == "bf16":
445
+ weight_dtype = torch.bfloat16
446
+ else:
447
+ weight_dtype = torch.float32
448
+
449
+ # Move text_encode and vae to gpu.
450
+ # For mixed precision training we cast the text_encoder and vae weights to half-precision
451
+ # as these models are only used for inference, keeping weights in full precision is not required.
452
+ vae.to(accelerator.device, dtype=weight_dtype)
453
+ if not args.train_text_encoder:
454
+ text_encoder.to(accelerator.device, dtype=weight_dtype)
455
+
456
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
457
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
458
+
459
+ # Afterwards we calculate our number of training epochs
460
+ num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
461
+
462
+ # We need to initialize the trackers we use, and also store our configuration.
463
+ # The trackers initializes automatically on the main process.
464
+ if accelerator.is_main_process:
465
+ tracker_config = vars(copy.deepcopy(args))
466
+ accelerator.init_trackers(args.validation_project_name, config=tracker_config)
467
+
468
+ # create validation pipeline (note: unet and vae are loaded again in float32)
469
+ val_pipeline = StableDiffusionInpaintPipeline.from_pretrained(
470
+ args.pretrained_model_name_or_path,
471
+ tokenizer=tokenizer,
472
+ text_encoder=text_encoder,
473
+ unet=unet,
474
+ vae=vae,
475
+ torch_dtype=weight_dtype,
476
+ safety_checker=None,
477
+ )
478
+ val_pipeline.set_progress_bar_config(disable=True)
479
+
480
+ # prepare validation dataset
481
+ val_pairs = [
482
+ {
483
+ "image": example["image"],
484
+ "mask_image": mask,
485
+ "prompt": example["prompt"],
486
+ }
487
+ for example in train_dataset.test_data
488
+ for mask in [example[key] for key in example if "mask" in key]
489
+ ]
490
+
491
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
492
+ def save_model_hook(models, weights, output_dir):
493
+ if accelerator.is_main_process:
494
+ for model in models:
495
+ sub_dir = "unet" if isinstance(model, type(accelerator.unwrap_model(unet))) else "text_encoder"
496
+ model.save_pretrained(os.path.join(output_dir, sub_dir))
497
+
498
+ # make sure to pop weight so that corresponding model is not saved again
499
+ weights.pop()
500
+
501
+ accelerator.register_save_state_pre_hook(save_model_hook)
502
+
503
+ print()
504
+
505
+ # Train!
506
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
507
+
508
+ logger.info("***** Running training *****")
509
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
510
+ logger.info(f" Num Epochs = {num_train_epochs}")
511
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
512
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
513
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
514
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
515
+
516
+ global_step = 0
517
+ first_epoch = 0
518
+
519
+ if args.resume_from_checkpoint:
520
+ if args.resume_from_checkpoint != "latest":
521
+ path = os.path.basename(args.resume_from_checkpoint)
522
+ else:
523
+ # Get the most recent checkpoint
524
+ dirs = os.listdir(args.output_dir)
525
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
526
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
527
+ path = dirs[-1] if len(dirs) > 0 else None
528
+
529
+ if path is None:
530
+ accelerator.print(
531
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
532
+ )
533
+ args.resume_from_checkpoint = None
534
+ else:
535
+ accelerator.print(f"Resuming from checkpoint {path}")
536
+ accelerator.load_state(os.path.join(args.output_dir, path))
537
+ global_step = int(path.split("-")[1])
538
+
539
+ resume_global_step = global_step * args.gradient_accumulation_steps
540
+ first_epoch = global_step // num_update_steps_per_epoch
541
+ resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
542
+
543
+ # Only show the progress bar once on each machine.
544
+ progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
545
+ progress_bar.set_description("Steps")
546
+
547
+ for epoch in range(first_epoch, num_train_epochs):
548
+ unet.train()
549
+ for step, batch in enumerate(train_dataloader):
550
+ # Skip steps until we reach the resumed step
551
+ if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
552
+ if step % args.gradient_accumulation_steps == 0:
553
+ progress_bar.update(1)
554
+ continue
555
+
556
+ with accelerator.accumulate(unet):
557
+ # Convert images to latent space
558
+ latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
559
+ latents = latents * vae.config.scaling_factor
560
+
561
+ # Convert masked images to latent space
562
+ masked_latents = vae.encode(
563
+ batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype)
564
+ ).latent_dist.sample()
565
+ masked_latents = masked_latents * vae.config.scaling_factor
566
+
567
+ masks = batch["masks"]
568
+ # resize the mask to latents shape as we concatenate the mask to the latents
569
+ mask = torch.stack(
570
+ [
571
+ torch.nn.functional.interpolate(mask, size=(args.resolution // 8, args.resolution // 8))
572
+ for mask in masks
573
+ ]
574
+ )
575
+ mask = mask.reshape(-1, 1, args.resolution // 8, args.resolution // 8)
576
+
577
+ # Sample noise that we'll add to the latents
578
+ noise = torch.randn_like(latents)
579
+ bsz = latents.shape[0]
580
+ # Sample a random timestep for each image
581
+ timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
582
+ timesteps = timesteps.long()
583
+
584
+ # Add noise to the latents according to the noise magnitude at each timestep
585
+ # (this is the forward diffusion process)
586
+ noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
587
+
588
+ # concatenate the noised latents with the mask and the masked latents
589
+ latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1)
590
+
591
+ # Get the text embedding for conditioning
592
+ encoder_hidden_states = text_encoder(batch["input_ids"])[0]
593
+
594
+ # Predict the noise residual
595
+ noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample
596
+
597
+ # Get the target for loss depending on the prediction type
598
+ if noise_scheduler.config.prediction_type == "epsilon":
599
+ target = noise
600
+ elif noise_scheduler.config.prediction_type == "v_prediction":
601
+ target = noise_scheduler.get_velocity(latents, noise, timesteps)
602
+ else:
603
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
604
+
605
+ loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
606
+
607
+ accelerator.backward(loss)
608
+ if accelerator.sync_gradients:
609
+ params_to_clip = (
610
+ itertools.chain(unet.parameters(), text_encoder.parameters())
611
+ if args.train_text_encoder
612
+ else unet.parameters()
613
+ )
614
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
615
+
616
+ optimizer.step()
617
+ lr_scheduler.step()
618
+ optimizer.zero_grad()
619
+
620
+ # Checks if the accelerator has performed an optimization step behind the scenes
621
+ if accelerator.sync_gradients:
622
+ progress_bar.update(1)
623
+ global_step += 1
624
+
625
+ if accelerator.is_main_process:
626
+ if (
627
+ global_step % args.validation_steps == 0
628
+ and global_step >= args.validation_from
629
+ and args.report_to_wandb
630
+ ):
631
+ log_validation(
632
+ val_pipeline,
633
+ text_encoder,
634
+ unet,
635
+ val_pairs,
636
+ accelerator,
637
+ )
638
+
639
+ if global_step % args.checkpointing_steps == 0:
640
+ checkpoint(
641
+ args,
642
+ global_step,
643
+ accelerator,
644
+ )
645
+
646
+ # Step logging
647
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
648
+ progress_bar.set_postfix(**logs)
649
+ accelerator.log(logs, step=global_step)
650
+
651
+ if global_step >= args.max_train_steps:
652
+ break
653
+
654
+ accelerator.wait_for_everyone()
655
+
656
+ # Create the pipeline using using the trained modules and save it.
657
+ if accelerator.is_main_process:
658
+ pipeline = StableDiffusionInpaintPipeline.from_pretrained(
659
+ args.pretrained_model_name_or_path,
660
+ unet=accelerator.unwrap_model(unet),
661
+ text_encoder=accelerator.unwrap_model(text_encoder),
662
+ )
663
+ pipeline.save_pretrained(args.output_dir)
664
+
665
+ # Terminate training
666
+ accelerator.end_training()
667
+
668
+
669
+ if __name__ == "__main__":
670
+ main()