File size: 2,006 Bytes
9670e85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
# Define dataset
# dataset = dict(
#     type="VariableVideoTextDataset",
#     data_path=None,
#     num_frames=None,
#     frame_interval=3,
#     image_size=(None, None),
#     transform_name="resize_crop",
# )
dataset = dict(
    type="VideoTextDataset",
    data_path=None,
    num_frames=1,
    frame_interval=1,
    image_size=(256, 256),
    transform_name="center",
)
bucket_config = {  # 6s/it
    "256": {1: (1.0, 256)},
    "512": {1: (1.0, 80)},
    "480p": {1: (1.0, 52)},
    "1024": {1: (1.0, 20)},
    "1080p": {1: (1.0, 8)},
}

# Define acceleration
num_workers = 16
dtype = "bf16"
grad_checkpoint = True
plugin = "zero2"
sp_size = 1

# Define model
# model = dict(
#     type="DiT-XL/2",
#     from_pretrained="/home/zhaowangbo/wangbo/PixArt-alpha/pretrained_models/PixArt-XL-2-512x512.pth",
#     # input_sq_size=512,  # pretrained model is trained on 512x512
#     enable_flash_attn=True,
#     enable_layernorm_kernel=True,
# )
model = dict(
    type="PixArt-XL/2",
    space_scale=1.0,
    time_scale=1.0,
    no_temporal_pos_emb=True,
    from_pretrained="PixArt-XL-2-512x512.pth",
    enable_flash_attn=True,
    enable_layernorm_kernel=True,
)
# model = dict(
#     type="DiT-XL/2",
#     # space_scale=1.0,
#     # time_scale=1.0,
#     no_temporal_pos_emb=True,
#     # from_pretrained="PixArt-XL-2-512x512.pth",
#     from_pretrained="/home/zhaowangbo/wangbo/PixArt-alpha/pretrained_models/PixArt-XL-2-512x512.pth",
#     enable_flash_attn=True,
#     enable_layernorm_kernel=True,
# )
vae = dict(
    type="VideoAutoencoderKL",
    from_pretrained="stabilityai/sd-vae-ft-ema",
    micro_batch_size=4,
)
text_encoder = dict(
    type="t5",
    from_pretrained="DeepFloyd/t5-v1_1-xxl",
    model_max_length=200,
    shardformer=True,
)
scheduler = dict(
    type="rflow",
    # timestep_respacing="",
)

# Others
seed = 42
outputs = "outputs"
wandb = False

epochs = 10
log_every = 10
ckpt_every = 500
load = None

batch_size = 100  # only for logging
lr = 2e-5
grad_clip = 1.0