mhassan101 commited on
Commit
6daaef4
1 Parent(s): 9a0e198

Upload config.json

Browse files
Files changed (1) hide show
  1. config.json +128 -0
config.json ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "morphablediffusion/config.json",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "attention_dropout": 0.0,
6
+ "attention_window": 512,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": 0.0,
9
+ "d_model": 1024,
10
+ "data": {
11
+ "params": {
12
+ "batch_size": 70,
13
+ "data_dir": "/cluster/scratch/xiychen/data/facescape_color_calibrated",
14
+ "mesh_topology": "flame",
15
+ "num_workers": 1,
16
+ "shuffled_expression": true
17
+ },
18
+ "target": "ldm.data.facescape.FaceScapeDataset"
19
+ },
20
+ "decoder_attention_heads": 16,
21
+ "decoder_ffn_dim": 4096,
22
+ "decoder_layerdrop": 0.0,
23
+ "decoder_layers": 12,
24
+ "decoder_start_token_id": 2,
25
+ "dropout": 0.1,
26
+ "encoder_attention_heads": 16,
27
+ "encoder_ffn_dim": 4096,
28
+ "encoder_layerdrop": 0.0,
29
+ "encoder_layers": 12,
30
+ "eos_token_id": 2,
31
+ "init_std": 0.02,
32
+ "is_encoder_decoder": true,
33
+ "lightning": {
34
+ "callbacks": {},
35
+ "modelcheckpoint": {
36
+ "params": {
37
+ "every_n_train_steps": 2000
38
+ }
39
+ },
40
+ "trainer": {
41
+ "accumulate_grad_batches": 1,
42
+ "benchmark": true,
43
+ "check_val_every_n_epoch": null,
44
+ "max_steps": 6000,
45
+ "num_sanity_val_steps": 0,
46
+ "precision": 32,
47
+ "val_check_interval": 250
48
+ }
49
+ },
50
+ "max_decoder_position_embeddings": 1024,
51
+ "max_encoder_position_embeddings": 16384,
52
+ "model": {
53
+ "base_learning_rate": "5e-5",
54
+ "params": {
55
+ "batch_view_num": 4,
56
+ "cfg_scale": 2.0,
57
+ "clip_image_encoder_path": "./ckpt/ViT-L-14.pt",
58
+ "drop_conditions": false,
59
+ "finetune_unet": true,
60
+ "image_size": 256,
61
+ "output_num": 8,
62
+ "projection": "perspective",
63
+ "scheduler_config": {
64
+ "params": {
65
+ "cycle_lengths": [
66
+ 100000
67
+ ],
68
+ "f_max": [
69
+ 1.0
70
+ ],
71
+ "f_min": [
72
+ 1.0
73
+ ],
74
+ "f_start": [
75
+ 0.02
76
+ ],
77
+ "warm_up_steps": [
78
+ 100
79
+ ]
80
+ },
81
+ "target": "ldm.lr_scheduler.LambdaLinearScheduler"
82
+ },
83
+ "target_elevation": 0,
84
+ "unet_config": {
85
+ "params": {
86
+ "attention_resolutions": [
87
+ 4,
88
+ 2,
89
+ 1
90
+ ],
91
+ "channel_mult": [
92
+ 1,
93
+ 2,
94
+ 4,
95
+ 4
96
+ ],
97
+ "context_dim": 768,
98
+ "image_size": 32,
99
+ "in_channels": 8,
100
+ "legacy": false,
101
+ "model_channels": 320,
102
+ "num_heads": 8,
103
+ "num_res_blocks": 2,
104
+ "out_channels": 4,
105
+ "transformer_depth": 1,
106
+ "use_checkpoint": true,
107
+ "use_spatial_transformer": true,
108
+ "volume_dims": [
109
+ 64,
110
+ 128,
111
+ 256,
112
+ 512
113
+ ]
114
+ },
115
+ "target": "ldm.models.diffusion.attention.DepthWiseAttention"
116
+ },
117
+ "use_spatial_volume": false,
118
+ "view_num": 16
119
+ },
120
+ "target": "ldm.models.diffusion.morphable_diffusion.SyncMultiviewDiffusion"
121
+ },
122
+ "model_type": "led",
123
+ "num_hidden_layers": 12,
124
+ "pad_token_id": 1,
125
+ "transformers_version": "4.42.4",
126
+ "use_cache": true,
127
+ "vocab_size": 50265
128
+ }