Zhiminli commited on
Commit
1c074f9
1 Parent(s): 107faf9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -8
README.md CHANGED
@@ -38,11 +38,15 @@ model='DiT-g/2' # model type
38
  task_flag="lora_jade_ema_rank64" # task flag
39
  resume=./ckpts/t2i/model/ # resume checkpoint
40
  index_file=dataset/index_v2_json/jade.json # index file
 
41
  batch_size=1 # training batch size
 
42
  grad_accu_steps=2 # gradient accumulation steps
 
 
 
 
43
  rank=64 # rank of lora
44
- max_training_steps=2000 # max training steps
45
- lr=0.0001 # learning rate
46
 
47
  PYTHONPATH=./ deepspeed hydit/train_large_deepspeed.py \
48
  --task-flag ${task_flag} \
@@ -58,20 +62,19 @@ PYTHONPATH=./ deepspeed hydit/train_large_deepspeed.py \
58
  --uncond-p 0.44 \
59
  --uncond-p-t5 0.44 \
60
  --index-file ${index_file} \
61
- --random-crop \
62
  --random-flip \
63
  --batch-size ${batch_size} \
64
- --image-size 1024 \
65
  --global-seed 999 \
66
  --grad-accu-steps ${grad_accu_steps} \
67
- --warmup-num-steps 0 \
68
  --use-flash-attn \
69
  --use-fp16 \
70
  --ema-dtype fp32 \
71
- --results-dir ./log_EXP \
72
- --ckpt-every 100 \
73
  --max-training-steps ${max_training_steps}\
74
- --ckpt-latest-every 2000 \
75
  --log-every 10 \
76
  --deepspeed \
77
  --deepspeed-optimizer \
 
38
  task_flag="lora_jade_ema_rank64" # task flag
39
  resume=./ckpts/t2i/model/ # resume checkpoint
40
  index_file=dataset/index_v2_json/jade.json # index file
41
+ results_dir=./log_EXP # save root for results
42
  batch_size=1 # training batch size
43
+ image_size=1024 # training image resolution
44
  grad_accu_steps=2 # gradient accumulation steps
45
+ warmup_num_steps=0 # warm-up steps
46
+ lr=0.0001 # learning rate
47
+ ckpt_every=100 # create a ckpt every a few steps.
48
+ ckpt_latest_every=2000 # create a ckpt named `latest.pt` every a few steps.
49
  rank=64 # rank of lora
 
 
50
 
51
  PYTHONPATH=./ deepspeed hydit/train_large_deepspeed.py \
52
  --task-flag ${task_flag} \
 
62
  --uncond-p 0.44 \
63
  --uncond-p-t5 0.44 \
64
  --index-file ${index_file} \
 
65
  --random-flip \
66
  --batch-size ${batch_size} \
67
+ --image-size ${image_size} \
68
  --global-seed 999 \
69
  --grad-accu-steps ${grad_accu_steps} \
70
+ --warmup-num-steps ${warmup_num_steps} \
71
  --use-flash-attn \
72
  --use-fp16 \
73
  --ema-dtype fp32 \
74
+ --results-dir ${results_dir} \
75
+ --ckpt-every ${ckpt_every} \
76
  --max-training-steps ${max_training_steps}\
77
+ --ckpt-latest-every ${ckpt_latest_every} \
78
  --log-every 10 \
79
  --deepspeed \
80
  --deepspeed-optimizer \