alandao commited on
Commit
008a9e2
1 Parent(s): 74dd660

Upload folder using huggingface_hub

Browse files
make_a_scene/00000.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3d625c328a138b60509ec6ef65a418db3cd157fb152b42d651a922676c3fd2c
3
+ size 611409920
make_a_scene/checkpoint_63.0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14994c5eb72bc0d79eb50b2f5641b5790bff1c12ae9adfac07693a8c38eb8cd4
3
+ size 1176181403
make_a_scene/img_config.yaml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mode: pretrain_image
2
+ devices:
3
+ - 0
4
+ - 1
5
+ - 2
6
+ - 3
7
+ - 4
8
+ - 5
9
+ - 6
10
+ #- 7
11
+
12
+ total_steps: 4700000
13
+ accumulate_grad: 5
14
+ resume: True
15
+ #checkpoint: /home/ubuntu/Make-A-Scene/outputs/pretrain_image/2022-06-10/06-36-07/checkpoint.pt
16
+ checkpoint: /home/ubuntu/Make-A-Scene/outputs/pretrain_image/2022-06-13/22-05-22/checkpoint_21.0.pt
17
+ log_period: 50
18
+ save_period: 1000
19
+ batch_size: 3 # 192 for 256 model and 128 for 512 model
20
+
21
+ model:
22
+ _target_: models.VQBASE
23
+ embed_dim: 256
24
+ n_embed: 8192
25
+ init_steps: 3000
26
+ reservoir_size: 12500 # 2e5 / 8
27
+ ddconfig:
28
+ z_channels: 256
29
+ in_channels: 3
30
+ out_channels: 3
31
+ channels: [128, 128, 128, 256, 512, 512] # [1, 1, 2, 4, 4]
32
+ num_res_blocks: 2
33
+ resolution: 512
34
+ attn_resolutions:
35
+ - 32
36
+ dropout: 0.0
37
+
38
+ optimizer:
39
+ vq:
40
+ lr: 5e-6
41
+ betas:
42
+ - 0.5
43
+ - 0.9
44
+ disc:
45
+ lr: 4.5e-6
46
+ betas:
47
+ - 0.5
48
+ - 0.9
49
+
50
+ dataset:
51
+ _target_: Data.dataset_preprocessor_web.S3ProcessedDataset
52
+ resampled: True
53
+ names:
54
+ - cc3m
55
+ - cc12m
56
+ # path: file:D:/PycharmProjects/Make-A-Scene/server/Make-A-Scene/dataset/coco/{00000..00004}.tar
57
+ # path: file:D:/PycharmProjects/Make-A-Scene/server/Make-A-Scene/dataset/coco/great_dataset.tar
58
+
59
+ loss:
60
+ #_target_: losses.VQVAEWithBCELoss
61
+ _target_: losses.loss_img.VQLPIPSWithDiscriminator
62
+ disc_start: 250001
63
+ disc_weight: 0.8
64
+ codebook_weight: 1.0
65
+
66
+ dataloader:
67
+ batch_size: ${batch_size}
68
+ num_workers: 8
69
+ pin_memory: True
70
+
71
+ hydra:
72
+ job:
73
+ chdir: True
74
+ run:
75
+ dir: ./outputs/${mode}/${now:%Y-%m-%d}/${now:%H-%M-%S}