Spaces:
Sleeping
Sleeping
# -------------------------------------------------------- | |
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) | |
# Github source: https://github.com/microsoft/unilm/tree/master/beit | |
# Copyright (c) 2021 Microsoft | |
# Licensed under The MIT License [see LICENSE for details] | |
# By Hangbo Bao | |
# Based on timm, DINO and DeiT code bases | |
# https://github.com/rwightman/pytorch-image-models/tree/master/timm | |
# https://github.com/facebookresearch/deit | |
# https://github.com/facebookresearch/dino | |
# --------------------------------------------------------' | |
import argparse | |
import datetime | |
import numpy as np | |
import time | |
import torch | |
import torch.backends.cudnn as cudnn | |
import json | |
import os | |
from pathlib import Path | |
from timm.models import create_model | |
from optim_factory import create_optimizer | |
from datasets import build_beit_pretraining_dataset | |
from engine_for_pretraining import train_one_epoch | |
from utils import NativeScalerWithGradNormCount as NativeScaler | |
import utils | |
import modeling_pretrain | |
def get_args(): | |
parser = argparse.ArgumentParser('BEiT pre-training script', add_help=False) | |
parser.add_argument('--batch_size', default=64, type=int) | |
parser.add_argument('--epochs', default=300, type=int) | |
parser.add_argument('--save_ckpt_freq', default=20, type=int) | |
parser.add_argument("--discrete_vae_weight_path", type=str) | |
parser.add_argument("--discrete_vae_type", type=str, default="dall-e") | |
# Model parameters | |
parser.add_argument('--model', default='beit_base_patch16_224_8k_vocab', type=str, metavar='MODEL', | |
help='Name of model to train') | |
parser.add_argument('--rel_pos_bias', action='store_true') | |
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias') | |
parser.set_defaults(rel_pos_bias=True) | |
parser.add_argument('--abs_pos_emb', action='store_true') | |
parser.set_defaults(abs_pos_emb=False) | |
parser.add_argument('--layer_scale_init_value', default=0.1, type=float, | |
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale") | |
parser.add_argument('--num_mask_patches', default=75, type=int, | |
help='number of the visual tokens/patches need be masked') | |
parser.add_argument('--max_mask_patches_per_block', type=int, default=None) | |
parser.add_argument('--min_mask_patches_per_block', type=int, default=16) | |
parser.add_argument('--input_size', default=224, type=int, | |
help='images input size for backbone') | |
parser.add_argument('--second_input_size', default=112, type=int, | |
help='images input size for discrete vae') | |
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', | |
help='Drop path rate (default: 0.1)') | |
# Optimizer parameters | |
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', | |
help='Optimizer (default: "adamw"') | |
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', | |
help='Optimizer Epsilon (default: 1e-8)') | |
parser.add_argument('--opt_betas', default=[0.9, 0.999], type=float, nargs='+', metavar='BETA', | |
help='Optimizer Betas (default: 0.9, 0.999, use opt default)') | |
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', | |
help='Clip gradient norm (default: None, no clipping)') | |
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', | |
help='SGD momentum (default: 0.9)') | |
parser.add_argument('--weight_decay', type=float, default=0.05, | |
help='weight decay (default: 0.05)') | |
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the | |
weight decay. We use a cosine schedule for WD. | |
(Set the same value with args.weight_decay to keep weight decay no change)""") | |
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', | |
help='learning rate (default: 5e-4)') | |
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', | |
help='warmup learning rate (default: 1e-6)') | |
parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR', | |
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') | |
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', | |
help='epochs to warmup LR, if scheduler supports') | |
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', | |
help='epochs to warmup LR, if scheduler supports') | |
# Augmentation parameters | |
parser.add_argument('--train_interpolation', type=str, default='bicubic', | |
help='Training interpolation (random, bilinear, bicubic default: "bicubic")') | |
parser.add_argument('--second_interpolation', type=str, default='lanczos', | |
help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")') | |
# Dataset parameters | |
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, | |
help='dataset path') | |
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true') | |
parser.add_argument('--output_dir', default='', | |
help='path where to save, empty for no saving') | |
parser.add_argument('--log_dir', default=None, | |
help='path where to tensorboard log') | |
parser.add_argument('--device', default='cuda', | |
help='device to use for training / testing') | |
parser.add_argument('--seed', default=0, type=int) | |
parser.add_argument('--resume', default='', help='resume from checkpoint') | |
parser.add_argument('--auto_resume', action='store_true') | |
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') | |
parser.set_defaults(auto_resume=True) | |
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', | |
help='start epoch') | |
parser.add_argument('--num_workers', default=10, type=int) | |
parser.add_argument('--pin_mem', action='store_true', | |
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') | |
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem', | |
help='') | |
parser.set_defaults(pin_mem=True) | |
# distributed training parameters | |
parser.add_argument('--world_size', default=1, type=int, | |
help='number of distributed processes') | |
parser.add_argument('--local_rank', default=-1, type=int) | |
parser.add_argument('--dist_on_itp', action='store_true') | |
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') | |
return parser.parse_args() | |
def get_model(args): | |
print(f"Creating model: {args.model}") | |
model = create_model( | |
args.model, | |
pretrained=False, | |
drop_path_rate=args.drop_path, | |
drop_block_rate=None, | |
use_shared_rel_pos_bias=args.rel_pos_bias, | |
use_abs_pos_emb=args.abs_pos_emb, | |
init_values=args.layer_scale_init_value, | |
) | |
return model | |
def main(args): | |
utils.init_distributed_mode(args) | |
print(args) | |
device = torch.device(args.device) | |
# fix the seed for reproducibility | |
seed = args.seed + utils.get_rank() | |
torch.manual_seed(seed) | |
np.random.seed(seed) | |
# random.seed(seed) | |
cudnn.benchmark = True | |
model = get_model(args) | |
patch_size = model.patch_embed.patch_size | |
print("Patch size = %s" % str(patch_size)) | |
args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1]) | |
args.patch_size = patch_size | |
# get dataset | |
dataset_train = build_beit_pretraining_dataset(args) | |
# prepare discrete vae | |
d_vae = utils.create_d_vae( | |
weight_path=args.discrete_vae_weight_path, d_vae_type=args.discrete_vae_type, | |
device=device, image_size=args.second_input_size) | |
if True: # args.distributed: | |
num_tasks = utils.get_world_size() | |
global_rank = utils.get_rank() | |
sampler_rank = global_rank | |
num_training_steps_per_epoch = len(dataset_train) // args.batch_size // num_tasks | |
sampler_train = torch.utils.data.DistributedSampler( | |
dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True | |
) | |
print("Sampler_train = %s" % str(sampler_train)) | |
else: | |
sampler_train = torch.utils.data.RandomSampler(dataset_train) | |
if global_rank == 0 and args.log_dir is not None: | |
os.makedirs(args.log_dir, exist_ok=True) | |
log_writer = utils.TensorboardLogger(log_dir=args.log_dir) | |
else: | |
log_writer = None | |
data_loader_train = torch.utils.data.DataLoader( | |
dataset_train, sampler=sampler_train, | |
batch_size=args.batch_size, | |
num_workers=args.num_workers, | |
pin_memory=args.pin_mem, | |
drop_last=True, | |
) | |
model.to(device) | |
model_without_ddp = model | |
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) | |
print("Model = %s" % str(model_without_ddp)) | |
print('number of params:', n_parameters) | |
total_batch_size = args.batch_size * utils.get_world_size() | |
print("LR = %.8f" % args.lr) | |
print("Batch size = %d" % total_batch_size) | |
print("Number of training steps = %d" % num_training_steps_per_epoch) | |
print("Number of training examples per epoch = %d" % (total_batch_size * num_training_steps_per_epoch)) | |
if args.distributed: | |
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) | |
model_without_ddp = model.module | |
optimizer = create_optimizer( | |
args, model_without_ddp) | |
loss_scaler = NativeScaler() | |
print("Use step level LR & WD scheduler!") | |
lr_schedule_values = utils.cosine_scheduler( | |
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, | |
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, | |
) | |
if args.weight_decay_end is None: | |
args.weight_decay_end = args.weight_decay | |
wd_schedule_values = utils.cosine_scheduler( | |
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) | |
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) | |
utils.auto_load_model( | |
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) | |
print(f"Start training for {args.epochs} epochs") | |
start_time = time.time() | |
for epoch in range(args.start_epoch, args.epochs): | |
if args.distributed: | |
data_loader_train.sampler.set_epoch(epoch) | |
if log_writer is not None: | |
log_writer.set_step(epoch * num_training_steps_per_epoch) | |
train_stats = train_one_epoch( | |
model, d_vae, data_loader_train, | |
optimizer, device, epoch, loss_scaler, | |
args.clip_grad, log_writer=log_writer, | |
start_steps=epoch * num_training_steps_per_epoch, | |
lr_schedule_values=lr_schedule_values, | |
wd_schedule_values=wd_schedule_values, | |
) | |
if args.output_dir: | |
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: | |
utils.save_model( | |
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, | |
loss_scaler=loss_scaler, epoch=epoch) | |
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, | |
'epoch': epoch, 'n_parameters': n_parameters} | |
if args.output_dir and utils.is_main_process(): | |
if log_writer is not None: | |
log_writer.flush() | |
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: | |
f.write(json.dumps(log_stats) + "\n") | |
total_time = time.time() - start_time | |
total_time_str = str(datetime.timedelta(seconds=int(total_time))) | |
print('Training time {}'.format(total_time_str)) | |
if __name__ == '__main__': | |
opts = get_args() | |
if opts.output_dir: | |
Path(opts.output_dir).mkdir(parents=True, exist_ok=True) | |
main(opts) | |