k4d3 commited on
Commit
68fb7d8
1 Parent(s): 22ae128

oh yeah these

Browse files

Signed-off-by: Balazs Horvath <acsipont@gmail.com>

tf-xyfriend1-bleh ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="friend-v1s2000"
21
+ TRAINING_DIR="/home/kade/datasets/friend"
22
+ OUTPUT_DIR="/home/kade/flux_output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ args=(
36
+ ## Model Paths
37
+ --pretrained_model_name_or_path ~/ComfyUI/models/unet/flux1-dev.safetensors
38
+ --clip_l ~/ComfyUI/models/clip/clip_l.safetensors
39
+ --t5xxl ~/ComfyUI/models/clip/t5xxl_fp16.safetensors
40
+ --ae ~/ComfyUI/models/vae/ae.safetensors
41
+ ## Network Arguments
42
+ # NOTE: Bad idea to train T5!
43
+ #--network_args
44
+ # "train_t5xxl=True"
45
+ ## Timestep Sampling
46
+ --timestep_sampling shift
47
+ # `--discrete_flow_shift` is the discrete flow shift for the Euler Discrete Scheduler,
48
+ # default is 3.0 (same as SD3).
49
+ --discrete_flow_shift 3.1582
50
+ # `--model_prediction_type` is how to interpret and process the model prediction.
51
+ # * `raw`: use as is, same as x-flux
52
+ # * `additive`: add to noisy input
53
+ # * `sigma_scaled`: apply sigma scaling, same as SD3
54
+ --model_prediction_type raw
55
+ --guidance_scale 1.0
56
+ # NOTE: In kohya's experiments,
57
+ # `--timestep_sampling shift --discrete_flow_shift 3.1582 --model_prediction_type raw --guidance_scale 1.0`
58
+ # (with the default `l2` `loss_type`) seems to work better.
59
+ #
60
+ # NOTE: The existing `--loss_type` option may be useful for FLUX.1 training. The default is `l2`.
61
+ #--loss_type l2
62
+ #
63
+ # Latents
64
+ --cache_latents_to_disk
65
+ --save_model_as safetensors
66
+ --sdpa
67
+ --persistent_data_loader_workers
68
+ --max_data_loader_n_workers 2
69
+ --seed 42
70
+ --max_train_steps=$STEPS
71
+ --gradient_checkpointing
72
+ --mixed_precision bf16
73
+ --optimizer_type=ClybW
74
+ --save_precision bf16
75
+ --network_module networks.lora_flux
76
+ --network_dim 4
77
+ --learning_rate 5e-4
78
+ --cache_text_encoder_outputs
79
+ --cache_text_encoder_outputs_to_disk
80
+ --fp8_base
81
+ --highvram
82
+ --dataset_config "$TRAINING_DIR/config.toml"
83
+ --output_dir $OUTPUT_DIR
84
+ --output_name $NAME
85
+ ## Sample Prompts
86
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
87
+ --sample_every_n_steps=20
88
+ --sample_sampler="euler"
89
+ --sample_at_first
90
+ --save_every_n_steps=100
91
+ )
92
+
93
+ cd ~/source/repos/sd-scripts-sd3
94
+ python "./flux_train_network.py" "${args[@]}"
95
+ cd ~
96
+
tf-yyfriend ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="yyfriend-v1s2000"
21
+ TRAINING_DIR="/home/kade/datasets/yyfriend"
22
+ OUTPUT_DIR="/home/kade/flux_output_dir/$NAME"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ args=(
36
+ ## Model Paths
37
+ --pretrained_model_name_or_path ~/ComfyUI/models/unet/flux1-dev.safetensors
38
+ --clip_l ~/ComfyUI/models/clip/clip_l.safetensors
39
+ --t5xxl ~/ComfyUI/models/clip/t5xxl_fp16.safetensors
40
+ --ae ~/ComfyUI/models/vae/ae.safetensors
41
+ ## Network Arguments
42
+ # NOTE: Bad idea to train T5!
43
+ #--network_args
44
+ # "train_t5xxl=True"
45
+ ## Timestep Sampling
46
+ --timestep_sampling shift
47
+ # `--discrete_flow_shift` is the discrete flow shift for the Euler Discrete Scheduler,
48
+ # default is 3.0 (same as SD3).
49
+ --discrete_flow_shift 3.1582
50
+ # `--model_prediction_type` is how to interpret and process the model prediction.
51
+ # * `raw`: use as is, same as x-flux
52
+ # * `additive`: add to noisy input
53
+ # * `sigma_scaled`: apply sigma scaling, same as SD3
54
+ --model_prediction_type raw
55
+ --guidance_scale 1.0
56
+ # NOTE: In kohya's experiments,
57
+ # `--timestep_sampling shift --discrete_flow_shift 3.1582 --model_prediction_type raw --guidance_scale 1.0`
58
+ # (with the default `l2` `loss_type`) seems to work better.
59
+ #
60
+ # NOTE: The existing `--loss_type` option may be useful for FLUX.1 training. The default is `l2`.
61
+ #--loss_type l2
62
+ #
63
+ # Latents
64
+ --cache_latents_to_disk
65
+ --save_model_as safetensors
66
+ --sdpa
67
+ --persistent_data_loader_workers
68
+ --max_data_loader_n_workers 2
69
+ --seed 42
70
+ --max_train_steps=$STEPS
71
+ --gradient_checkpointing
72
+ --mixed_precision bf16
73
+ --optimizer_type=ClybW
74
+ --save_precision bf16
75
+ --network_module networks.lora_flux
76
+ --network_dim 4
77
+ --learning_rate 5e-4
78
+ --cache_text_encoder_outputs
79
+ --cache_text_encoder_outputs_to_disk
80
+ --fp8_base
81
+ --highvram
82
+ --dataset_config "$TRAINING_DIR/config.toml"
83
+ --output_dir $OUTPUT_DIR
84
+ --output_name $NAME
85
+ ## Sample Prompts
86
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
87
+ --sample_every_n_steps=20
88
+ --sample_sampler="euler"
89
+ --sample_at_first
90
+ --save_every_n_steps=100
91
+ )
92
+
93
+ cd ~/source/repos/sd-scripts-sd3
94
+ python "./flux_train_network.py" "${args[@]}"
95
+ cd ~
96
+
training_scripts/tf ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="friend-v3s2000"
21
+ TRAINING_DIR="/home/kade/datasets/friend"
22
+ OUTPUT_DIR="/home/kade/flux_output_dir/$NAME"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ args=(
36
+ ## Model Paths
37
+ --pretrained_model_name_or_path ~/ComfyUI/models/unet/flux1-dev.safetensors
38
+ --clip_l ~/ComfyUI/models/clip/clip_l.safetensors
39
+ --t5xxl ~/ComfyUI/models/clip/t5xxl_fp16.safetensors
40
+ --ae ~/ComfyUI/models/vae/ae.safetensors
41
+ ## Network Arguments
42
+ # NOTE: Bad idea to train T5!
43
+ #--network_args
44
+ # "train_t5xxl=True"
45
+ ## Timestep Sampling
46
+ --timestep_sampling shift
47
+ # `--discrete_flow_shift` is the discrete flow shift for the Euler Discrete Scheduler,
48
+ # default is 3.0 (same as SD3).
49
+ --discrete_flow_shift 3.1582
50
+ # `--model_prediction_type` is how to interpret and process the model prediction.
51
+ # * `raw`: use as is, same as x-flux
52
+ # * `additive`: add to noisy input
53
+ # * `sigma_scaled`: apply sigma scaling, same as SD3
54
+ --model_prediction_type raw
55
+ --guidance_scale 1.0
56
+ # NOTE: In kohya's experiments,
57
+ # `--timestep_sampling shift --discrete_flow_shift 3.1582 --model_prediction_type raw --guidance_scale 1.0`
58
+ # (with the default `l2` `loss_type`) seems to work better.
59
+ #
60
+ # NOTE: The existing `--loss_type` option may be useful for FLUX.1 training. The default is `l2`.
61
+ #--loss_type l2
62
+ #
63
+ # Latents
64
+ --cache_latents_to_disk
65
+ --save_model_as safetensors
66
+ --sdpa
67
+ --persistent_data_loader_workers
68
+ --max_data_loader_n_workers 2
69
+ --seed 42
70
+ --max_train_steps=$STEPS
71
+ --gradient_checkpointing
72
+ --mixed_precision bf16
73
+ --optimizer_type=ClybW
74
+ --save_precision bf16
75
+ --network_module networks.lora_flux
76
+ --network_dim 4
77
+ --learning_rate 5e-4
78
+ --cache_text_encoder_outputs
79
+ --cache_text_encoder_outputs_to_disk
80
+ --fp8_base
81
+ --highvram
82
+ --dataset_config "$TRAINING_DIR/config.toml"
83
+ --output_dir $OUTPUT_DIR
84
+ --output_name $NAME
85
+ ## Sample Prompts
86
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
87
+ --sample_every_n_steps=20
88
+ --sample_sampler="euler"
89
+ --sample_at_first
90
+ --save_every_n_steps=100
91
+ )
92
+
93
+ cd ~/source/repos/sd-scripts-sd3
94
+ python "./flux_train_network.py" "${args[@]}"
95
+ cd ~
96
+
training_scripts/tp-by_chunie-lokr ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_chunie-v3s1200"
21
+ TRAINING_DIR="/home/kade/datasets/by_chunie"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # ⚠️ TODO: Benchmark...
28
+ --debiased_estimation_loss
29
+ # ⚠️ TODO: What does this do? Does it even work?
30
+ --max_token_length=225
31
+ # Keep Tokens
32
+ --keep_tokens=1
33
+ --keep_tokens_separator="|||"
34
+ # Model
35
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
36
+ # Output, logging
37
+ --output_dir="$OUTPUT_DIR/$NAME"
38
+ --output_name="$NAME"
39
+ --log_prefix="$NAME-"
40
+ --log_with=tensorboard
41
+ --logging_dir="$OUTPUT_DIR/logs"
42
+ --seed=1728871242
43
+ # Dataset
44
+ --train_data_dir="$TRAINING_DIR"
45
+ --dataset_repeats=1
46
+ --resolution="1024,1024"
47
+ --enable_bucket
48
+ --bucket_reso_steps=64
49
+ --min_bucket_reso=256
50
+ --max_bucket_reso=2048
51
+ --flip_aug
52
+ --shuffle_caption
53
+ --cache_latents
54
+ --cache_latents_to_disk
55
+ --max_data_loader_n_workers=8
56
+ --persistent_data_loader_workers
57
+ # Network config
58
+ --network_dim=100000
59
+ # ⚠️ TODO: Plot
60
+ --network_alpha=64
61
+ --network_module="lycoris.kohya"
62
+ --network_args
63
+ "preset=full"
64
+ "conv_dim=100000"
65
+ "decompose_both=False"
66
+ "conv_alpha=64"
67
+ "rank_dropout=0"
68
+ "module_dropout=0"
69
+ "use_tucker=False"
70
+ "use_scalar=False"
71
+ "rank_dropout_scale=False"
72
+ "algo=lokr"
73
+ "bypass_mode=False"
74
+ "factor=32"
75
+ "use_cp=True"
76
+ "dora_wd=True"
77
+ "train_norm=False"
78
+ --network_dropout=0
79
+ # Optimizer config
80
+ --optimizer_type=FCompass
81
+ --train_batch_size=8
82
+ --gradient_accumulation_steps=6
83
+ --max_grad_norm=1
84
+ --gradient_checkpointing
85
+ --lr_warmup_steps=0
86
+ #--scale_weight_norms=1
87
+ # LR Scheduling
88
+ --max_train_steps=3072
89
+ --learning_rate=0.0005
90
+ --unet_lr=0.0002
91
+ --text_encoder_lr=0.0001
92
+ --lr_scheduler="cosine"
93
+ --lr_scheduler_args="num_cycles=0.375"
94
+ # Noise
95
+ --multires_noise_iterations=12
96
+ --multires_noise_discount=0.4
97
+ #--min_snr_gamma=1
98
+ # Optimization, details
99
+ --no_half_vae
100
+ --sdpa
101
+ --mixed_precision="bf16"
102
+ # Saving
103
+ --save_model_as="safetensors"
104
+ --save_precision="fp16"
105
+ --save_every_n_steps=100
106
+ # Saving States
107
+ #--save_state
108
+ # Either resume from a saved state
109
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
110
+ #--skip_until_initial_step
111
+ # Or from a checkpoint
112
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
113
+ #--initial_step=120
114
+ # Sampling
115
+ --sample_every_n_steps=20
116
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
117
+ --sample_sampler="euler_a"
118
+ --caption_extension=".txt"
119
+ )
120
+
121
+ cd ~/source/repos/sd-scripts
122
+ #accelerate launch --num_cpu_threads_per_process=2
123
+ python "./sdxl_train_network.py" "${args[@]}"
124
+ cd ~
125
+
training_scripts/tp-by_darkgem-lokr ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_darkgem-v3s2400"
21
+ TRAINING_DIR="/home/kade/datasets/by_darkgem"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=False"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=32"
86
+ "use_cp=True"
87
+ "dora_wd=True"
88
+ "train_norm=False"
89
+ --network_dropout=0
90
+ # Optimizer config
91
+ --optimizer_type=FCompass
92
+ --train_batch_size=8
93
+ --gradient_accumulation_steps=6
94
+ --max_grad_norm=1
95
+ --gradient_checkpointing
96
+ --lr_warmup_steps=0
97
+ #--scale_weight_norms=1
98
+ # LR Scheduling
99
+ --max_train_steps=$STEPS
100
+ --learning_rate=0.0005
101
+ --unet_lr=0.0002
102
+ --text_encoder_lr=0.0001
103
+ --lr_scheduler="cosine"
104
+ --lr_scheduler_args="num_cycles=0.375"
105
+ # Noise
106
+ --multires_noise_iterations=12
107
+ --multires_noise_discount=0.4
108
+ #--min_snr_gamma=1
109
+ # Optimization, details
110
+ --no_half_vae
111
+ --sdpa
112
+ --mixed_precision="bf16"
113
+ # Saving
114
+ --save_model_as="safetensors"
115
+ --save_precision="fp16"
116
+ --save_every_n_steps=100
117
+ # Saving States
118
+ #--save_state
119
+ # Either resume from a saved state
120
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
121
+ #--skip_until_initial_step
122
+ # Or from a checkpoint
123
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
124
+ #--initial_step=120
125
+ # Sampling
126
+ --sample_every_n_steps=100
127
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
128
+ --sample_sampler="euler_a"
129
+ --sample_at_first
130
+ --caption_extension=".txt"
131
+ )
132
+ cd ~/source/repos/sd-scripts
133
+ #accelerate launch --num_cpu_threads_per_process=2
134
+ python "./sdxl_train_network.py" "${args[@]}"
135
+ cd ~
training_scripts/tp-by_darkgem-lokr+factor ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_darkgem-v4s2400"
21
+ TRAINING_DIR="/home/kade/datasets/by_darkgem"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=False"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=16"
86
+ "use_cp=True"
87
+ "dora_wd=True"
88
+ "train_norm=False"
89
+ --network_dropout=0
90
+ # Optimizer config
91
+ --optimizer_type=FCompass
92
+ --train_batch_size=8
93
+ --gradient_accumulation_steps=6
94
+ --max_grad_norm=1
95
+ --gradient_checkpointing
96
+ --lr_warmup_steps=0
97
+ #--scale_weight_norms=1
98
+ # LR Scheduling
99
+ --max_train_steps=$STEPS
100
+ --learning_rate=0.0005
101
+ --unet_lr=0.0002
102
+ --text_encoder_lr=0.0001
103
+ --lr_scheduler="cosine"
104
+ --lr_scheduler_args="num_cycles=0.375"
105
+ # Noise
106
+ --multires_noise_iterations=12
107
+ --multires_noise_discount=0.4
108
+ #--min_snr_gamma=1
109
+ # Optimization, details
110
+ --no_half_vae
111
+ --sdpa
112
+ --mixed_precision="bf16"
113
+ # Saving
114
+ --save_model_as="safetensors"
115
+ --save_precision="fp16"
116
+ --save_every_n_steps=100
117
+ # Saving States
118
+ #--save_state
119
+ # Either resume from a saved state
120
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
121
+ #--skip_until_initial_step
122
+ # Or from a checkpoint
123
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
124
+ #--initial_step=120
125
+ # Sampling
126
+ --sample_every_n_steps=100
127
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
128
+ --sample_sampler="euler_a"
129
+ --sample_at_first
130
+ --caption_extension=".txt"
131
+ )
132
+ cd ~/source/repos/sd-scripts
133
+ #accelerate launch --num_cpu_threads_per_process=2
134
+ python "./sdxl_train_network.py" "${args[@]}"
135
+ cd ~
training_scripts/tp-by_darkgem-lokr+factor+lowerLR ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_darkgem-v5s2400"
21
+ TRAINING_DIR="/home/kade/datasets/by_darkgem"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=True"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=16"
86
+ "dora_wd=True"
87
+ "train_norm=False"
88
+ --network_dropout=0
89
+ # Optimizer config
90
+ --optimizer_type=FCompass
91
+ --train_batch_size=8
92
+ --gradient_accumulation_steps=2
93
+ --max_grad_norm=1
94
+ --gradient_checkpointing
95
+ #--scale_weight_norms=1
96
+ # LR Scheduling
97
+ --max_train_steps=$STEPS
98
+ --lr_warmup_steps=0
99
+ --learning_rate=0.0002
100
+ --unet_lr=0.0002
101
+ --text_encoder_lr=0.0001
102
+ --lr_scheduler="cosine"
103
+ --lr_scheduler_args="num_cycles=0.375"
104
+ # Noise
105
+ --multires_noise_iterations=12
106
+ --multires_noise_discount=0.4
107
+ #--min_snr_gamma=1
108
+ # Optimization, details
109
+ --no_half_vae
110
+ --sdpa
111
+ --mixed_precision="bf16"
112
+ # Saving
113
+ --save_model_as="safetensors"
114
+ --save_precision="fp16"
115
+ --save_every_n_steps=100
116
+ # Saving States
117
+ #--save_state
118
+ # Either resume from a saved state
119
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
120
+ #--skip_until_initial_step
121
+ # Or from a checkpoint
122
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
123
+ #--initial_step=120
124
+ # Sampling
125
+ --sample_every_n_steps=100
126
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
127
+ --sample_sampler="euler_a"
128
+ --sample_at_first
129
+ --caption_extension=".txt"
130
+ )
131
+ cd ~/source/repos/sd-scripts
132
+ #accelerate launch --num_cpu_threads_per_process=2
133
+ python "./sdxl_train_network.py" "${args[@]}"
134
+ cd ~
training_scripts/tp-by_darkgem-lokr+factor+lowerLR_lowres ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_darkgem-512b48-v5s800"
21
+ TRAINING_DIR="/home/kade/datasets/by_darkgem"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="512,512"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=1024
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_scalar=False"
81
+ "rank_dropout_scale=False"
82
+ "algo=lokr"
83
+ "bypass_mode=False"
84
+ "factor=16"
85
+ "use_tucker=True"
86
+ "dora_wd=True"
87
+ "train_norm=False"
88
+ --network_dropout=0
89
+ # Optimizer config
90
+ --optimizer_type=FCompass
91
+ --train_batch_size=48
92
+ #--gradient_accumulation_steps=1
93
+ --max_grad_norm=1
94
+ --gradient_checkpointing
95
+ #--scale_weight_norms=1
96
+ # LR Scheduling
97
+ --max_train_steps=$STEPS
98
+ --lr_warmup_steps=0
99
+ --learning_rate=0.0003461
100
+ --unet_lr=0.0002
101
+ --text_encoder_lr=0.0001
102
+ --lr_scheduler="cosine"
103
+ --lr_scheduler_args="num_cycles=0.375"
104
+ # Noise
105
+ --multires_noise_iterations=12
106
+ --multires_noise_discount=0.4
107
+ #--min_snr_gamma=1
108
+ # Optimization, details
109
+ --no_half_vae
110
+ --sdpa
111
+ --mixed_precision="bf16"
112
+ # Saving
113
+ --save_model_as="safetensors"
114
+ --save_precision="fp16"
115
+ --save_every_n_steps=100
116
+ # Saving States
117
+ #--save_state
118
+ # Either resume from a saved state
119
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
120
+ #--skip_until_initial_step
121
+ # Or from a checkpoint
122
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
123
+ #--initial_step=120
124
+ # Sampling
125
+ --sample_every_n_steps=100
126
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
127
+ --sample_sampler="euler_a"
128
+ --sample_at_first
129
+ --caption_extension=".txt"
130
+ )
131
+ cd ~/source/repos/sd-scripts
132
+ #accelerate launch --num_cpu_threads_per_process=2
133
+ python "./sdxl_train_network.py" "${args[@]}"
134
+ cd ~
training_scripts/tp-by_hamgas-lokr ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_hamgas-v3s1200"
21
+ TRAINING_DIR="/home/kade/datasets/by_hamgas"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # ⚠️ TODO: Benchmark...
28
+ --debiased_estimation_loss
29
+ # ⚠️ TODO: What does this do? Does it even work?
30
+ --max_token_length=225
31
+ # Keep Tokens
32
+ --keep_tokens=1
33
+ --keep_tokens_separator="|||"
34
+ # Model
35
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
36
+ # Output, logging
37
+ --output_dir="$OUTPUT_DIR/$NAME"
38
+ --output_name="$NAME"
39
+ --log_prefix="$NAME-"
40
+ --log_with=tensorboard
41
+ --logging_dir="$OUTPUT_DIR/logs"
42
+ --seed=1728871242
43
+ # Dataset
44
+ --train_data_dir="$TRAINING_DIR"
45
+ --dataset_repeats=1
46
+ --resolution="1024,1024"
47
+ --enable_bucket
48
+ --bucket_reso_steps=64
49
+ --min_bucket_reso=256
50
+ --max_bucket_reso=2048
51
+ --flip_aug
52
+ --shuffle_caption
53
+ --cache_latents
54
+ --cache_latents_to_disk
55
+ --max_data_loader_n_workers=8
56
+ --persistent_data_loader_workers
57
+ # Network config
58
+ --network_dim=100000
59
+ # ⚠️ TODO: Plot
60
+ --network_alpha=64
61
+ --network_module="lycoris.kohya"
62
+ --network_args
63
+ "preset=full"
64
+ "conv_dim=100000"
65
+ "decompose_both=False"
66
+ "conv_alpha=64"
67
+ "rank_dropout=0"
68
+ "module_dropout=0"
69
+ "use_tucker=False"
70
+ "use_scalar=False"
71
+ "rank_dropout_scale=False"
72
+ "algo=lokr"
73
+ "bypass_mode=False"
74
+ "factor=32"
75
+ "use_cp=True"
76
+ "dora_wd=True"
77
+ "train_norm=False"
78
+ --network_dropout=0
79
+ # Optimizer config
80
+ --optimizer_type=FCompass
81
+ --train_batch_size=8
82
+ --gradient_accumulation_steps=6
83
+ --max_grad_norm=1
84
+ --gradient_checkpointing
85
+ --lr_warmup_steps=0
86
+ #--scale_weight_norms=1
87
+ # LR Scheduling
88
+ --max_train_steps=4096
89
+ --learning_rate=0.0005
90
+ --unet_lr=0.0002
91
+ --text_encoder_lr=0.0001
92
+ --lr_scheduler="cosine"
93
+ --lr_scheduler_args="num_cycles=0.375"
94
+ # Noise
95
+ --multires_noise_iterations=12
96
+ --multires_noise_discount=0.4
97
+ #--min_snr_gamma=1
98
+ # Optimization, details
99
+ --no_half_vae
100
+ --sdpa
101
+ --mixed_precision="bf16"
102
+ # Saving
103
+ --save_model_as="safetensors"
104
+ --save_precision="fp16"
105
+ --save_every_n_steps=100
106
+ # Saving States
107
+ #--save_state
108
+ # Either resume from a saved state
109
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
110
+ #--skip_until_initial_step
111
+ # Or from a checkpoint
112
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
113
+ #--initial_step=120
114
+ # Sampling
115
+ --sample_every_n_steps=100
116
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
117
+ --sample_sampler="euler_a"
118
+ --caption_extension=".txt"
119
+ )
120
+
121
+ cd ~/source/repos/sd-scripts
122
+ #accelerate launch --num_cpu_threads_per_process=2
123
+ python "./sdxl_train_network.py" "${args[@]}"
124
+ cd ~
125
+
training_scripts/tp-by_jinxit-lokr ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # NOTE:
4
+ # - Changed the compression factor of the LoKr.
5
+ #
6
+ # >>> conda initialize >>>
7
+ # !! Contents within this block are managed by 'conda init' !!
8
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
9
+ if [ $? -eq 0 ]; then
10
+ eval "$__conda_setup"
11
+ else
12
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
13
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
14
+ else
15
+ export PATH="/home/kade/miniconda3/bin:$PATH"
16
+ fi
17
+ fi
18
+ unset __conda_setup
19
+ # <<< conda initialize <
20
+
21
+ conda activate sdscripts
22
+
23
+ NAME="by_jinxit-v3s6400"
24
+ TRAINING_DIR="/home/kade/datasets/by_jinxit"
25
+ OUTPUT_DIR="/home/kade/output_dir"
26
+
27
+ # Extract the number of steps from the NAME
28
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
29
+
30
+ # If no number is found at the end of NAME, set a default value
31
+ if [ -z "$STEPS" ]; then
32
+ STEPS=4096
33
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
34
+ else
35
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
36
+ fi
37
+
38
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
39
+ # --min_snr_gamma=1
40
+ args=(
41
+ # ⚠️ TODO: Benchmark...
42
+ --debiased_estimation_loss
43
+ # ⚠️ TODO: What does this do? Does it even work?
44
+ --max_token_length=225
45
+ # Keep Tokens
46
+ --keep_tokens=1
47
+ --keep_tokens_separator="|||"
48
+ # Model
49
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
50
+ # Output, logging
51
+ --output_dir="$OUTPUT_DIR/$NAME"
52
+ --output_name="$NAME"
53
+ --log_prefix="$NAME-"
54
+ --log_with=tensorboard
55
+ --logging_dir="$OUTPUT_DIR/logs"
56
+ --seed=1728871242
57
+ # Dataset
58
+ --train_data_dir="$TRAINING_DIR"
59
+ --dataset_repeats=1
60
+ --resolution="1024,1024"
61
+ --enable_bucket
62
+ --bucket_reso_steps=64
63
+ --min_bucket_reso=256
64
+ --max_bucket_reso=2048
65
+ --flip_aug
66
+ --shuffle_caption
67
+ --cache_latents
68
+ --cache_latents_to_disk
69
+ --max_data_loader_n_workers=8
70
+ --persistent_data_loader_workers
71
+ # Network config
72
+ --network_dim=100000
73
+ # ⚠️ TODO: Plot
74
+ --network_alpha=64
75
+ --network_module="lycoris.kohya"
76
+ --network_args
77
+ "preset=full"
78
+ "conv_dim=100000"
79
+ "decompose_both=False"
80
+ "conv_alpha=64"
81
+ "rank_dropout=0"
82
+ "module_dropout=0"
83
+ "use_tucker=False"
84
+ "use_scalar=False"
85
+ "rank_dropout_scale=False"
86
+ "algo=lokr"
87
+ "bypass_mode=False"
88
+ "factor=16"
89
+ "use_cp=True"
90
+ "dora_wd=True"
91
+ "train_norm=False"
92
+ --network_dropout=0
93
+ # Optimizer config
94
+ --optimizer_type=FCompass
95
+ --train_batch_size=8
96
+ --gradient_accumulation_steps=6
97
+ --max_grad_norm=1
98
+ --gradient_checkpointing
99
+ --lr_warmup_steps=0
100
+ #--scale_weight_norms=1
101
+ # LR Scheduling
102
+ --max_train_steps=$STEPS
103
+ --learning_rate=0.0005
104
+ --unet_lr=0.0002
105
+ --text_encoder_lr=0.0001
106
+ --lr_scheduler="cosine"
107
+ --lr_scheduler_args="num_cycles=0.375"
108
+ # Noise
109
+ --multires_noise_iterations=12
110
+ --multires_noise_discount=0.4
111
+ #--min_snr_gamma=1
112
+ # Optimization, details
113
+ --no_half_vae
114
+ --sdpa
115
+ --mixed_precision="bf16"
116
+ # Saving
117
+ --save_model_as="safetensors"
118
+ --save_precision="fp16"
119
+ --save_every_n_steps=100
120
+ # Saving States
121
+ #--save_state
122
+ # Either resume from a saved state
123
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
124
+ #--skip_until_initial_step
125
+ # Or from a checkpoint
126
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
127
+ #--initial_step=120
128
+ # Sampling
129
+ --sample_every_n_steps=200
130
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
131
+ --sample_sampler="euler_a"
132
+ --caption_extension=".txt"
133
+ )
134
+ cd ~/source/repos/sd-scripts
135
+ #accelerate launch --num_cpu_threads_per_process=2
136
+ python "./sdxl_train_network.py" "${args[@]}"
137
+ cd ~
training_scripts/tp-by_kousen ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_kousen-v1s400"
21
+ TRAINING_DIR="/home/kade/datasets/by_kousen"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # Keep Tokens
28
+ --keep_tokens=1
29
+ --keep_tokens_separator="|||"
30
+ # Model
31
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
32
+ # Output, logging
33
+ --output_dir="$OUTPUT_DIR/$NAME"
34
+ --output_name="$NAME"
35
+ --log_prefix="$NAME-"
36
+ --log_with=tensorboard
37
+ --logging_dir="$OUTPUT_DIR/logs"
38
+ --seed=1728871242
39
+ # Dataset
40
+ --train_data_dir="$TRAINING_DIR"
41
+ --dataset_repeats=1
42
+ --resolution="1024,1024"
43
+ --enable_bucket
44
+ --bucket_reso_steps=64
45
+ --min_bucket_reso=256
46
+ --max_bucket_reso=2048
47
+ --flip_aug
48
+ --shuffle_caption
49
+ --cache_latents
50
+ --cache_latents_to_disk
51
+ --max_data_loader_n_workers=8
52
+ --persistent_data_loader_workers
53
+ # Network config
54
+ --network_dim=8
55
+ --network_alpha=4
56
+ --network_module="lycoris.kohya"
57
+ --network_args
58
+ "preset=full"
59
+ "conv_dim=256"
60
+ "conv_alpha=4"
61
+ "rank_dropout=0"
62
+ "module_dropout=0"
63
+ "use_tucker=False"
64
+ "use_scalar=False"
65
+ "rank_dropout_scale=False"
66
+ "algo=locon"
67
+ "dora_wd=False"
68
+ "train_norm=False"
69
+ --network_dropout=0
70
+ # Optimizer config
71
+ --optimizer_type=FCompass
72
+ --train_batch_size=8
73
+ --gradient_accumulation_steps=6
74
+ --max_grad_norm=1
75
+ --gradient_checkpointing
76
+ #--lr_warmup_steps=6
77
+ #--scale_weight_norms=1
78
+ # LR Scheduling
79
+ --max_train_steps=400
80
+ --learning_rate=0.0002
81
+ --unet_lr=0.0002
82
+ --text_encoder_lr=0.0001
83
+ --lr_scheduler="cosine"
84
+ --lr_scheduler_args="num_cycles=0.375"
85
+ # Noise
86
+ --multires_noise_iterations=12
87
+ --multires_noise_discount=0.4
88
+ #--min_snr_gamma=1
89
+ # Optimization, details
90
+ --no_half_vae
91
+ --sdpa
92
+ --mixed_precision="bf16"
93
+ # Saving
94
+ --save_model_as="safetensors"
95
+ --save_precision="fp16"
96
+ --save_every_n_steps=100
97
+ # Saving States
98
+ #--save_state
99
+ # Either resume from a saved state
100
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
101
+ #--skip_until_initial_step
102
+ # Or from a checkpoint
103
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
104
+ #--initial_step=120
105
+ # Sampling
106
+ --sample_every_n_steps=100
107
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
108
+ --sample_sampler="euler_a"
109
+ --caption_extension=".txt"
110
+ )
111
+
112
+ cd ~/source/repos/sd-scripts
113
+ #accelerate launch --num_cpu_threads_per_process=2
114
+ python "./sdxl_train_network.py" "${args[@]}"
115
+ cd ~
116
+
training_scripts/tp-by_kunaboto ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ conda activate sdscripts
4
+
5
+ NAME="by_kunaboto-v1s600"
6
+ TRAINING_DIR="/home/kade/training/by_kunaboto"
7
+ OUTPUT_DIR="/home/kade/output_dir"
8
+
9
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
10
+ # --min_snr_gamma=1
11
+ args=(
12
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
13
+ # Output, logging
14
+ --output_dir="$OUTPUT_DIR/$NAME"
15
+ --output_name="$NAME"
16
+ --log_prefix="$NAME-"
17
+ --log_with=tensorboard
18
+ --logging_dir="$OUTPUT_DIR/logs"
19
+ --seed=1728871242
20
+ # Dataset
21
+ --train_data_dir="$TRAINING_DIR"
22
+ --dataset_repeats=1
23
+ --resolution="1024,1024"
24
+ --enable_bucket
25
+ --bucket_reso_steps=32
26
+ --min_bucket_reso=256
27
+ --max_bucket_reso=2048
28
+ --flip_aug
29
+ --shuffle_caption
30
+ --cache_latents
31
+ --cache_latents_to_disk
32
+ --max_data_loader_n_workers=8
33
+ --persistent_data_loader_workers
34
+ # Network config
35
+ --network_dim=8
36
+ --network_alpha=4
37
+ --network_module="lycoris.kohya"
38
+ --network_args
39
+ "preset=full"
40
+ "conv_dim=256"
41
+ "conv_alpha=4"
42
+ "rank_dropout=0"
43
+ "module_dropout=0"
44
+ "use_tucker=False"
45
+ "use_scalar=False"
46
+ "rank_dropout_scale=False"
47
+ "algo=locon"
48
+ "dora_wd=False"
49
+ "train_norm=False"
50
+ --network_dropout=0
51
+ # Optimizer config
52
+ --optimizer_type=FCompass
53
+ --train_batch_size=8
54
+ --gradient_accumulation_steps=6
55
+ --max_grad_norm=1
56
+ --gradient_checkpointing
57
+ #--lr_warmup_steps=6
58
+ #--scale_weight_norms=1
59
+ # LR Scheduling
60
+ --max_train_steps=600
61
+ --learning_rate=0.0002
62
+ --unet_lr=0.0002
63
+ --text_encoder_lr=0.0001
64
+ --lr_scheduler="cosine"
65
+ --lr_scheduler_args="num_cycles=0.375"
66
+ # Noise
67
+ --multires_noise_iterations=12
68
+ --multires_noise_discount=0.4
69
+ #--min_snr_gamma=1
70
+ # Optimization, details
71
+ --no_half_vae
72
+ --sdpa
73
+ --mixed_precision="bf16"
74
+ # Saving
75
+ --save_model_as="safetensors"
76
+ --save_precision="fp16"
77
+ --save_every_n_steps=100
78
+ # Saving States
79
+ #--save_state
80
+ # Either resume from a saved state
81
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
82
+ #--skip_until_initial_step
83
+ # Or from a checkpoint
84
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
85
+ #--initial_step=120
86
+ # Sampling
87
+ --sample_every_n_steps=100
88
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
89
+ --sample_sampler="euler_a"
90
+ --caption_extension=".txt"
91
+ )
92
+
93
+ cd ~/source/repos/sd-scripts
94
+ #accelerate launch --num_cpu_threads_per_process=2
95
+ python "./sdxl_train_network.py" "${args[@]}"
96
+ cd ~
97
+
training_scripts/tp-by_latrans-lokr ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_latrans-v3s1200"
21
+ TRAINING_DIR="/home/kade/datasets/by_latrans"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=False"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=32"
86
+ "use_cp=True"
87
+ "dora_wd=True"
88
+ "train_norm=False"
89
+ --network_dropout=0
90
+ # Optimizer config
91
+ --optimizer_type=FCompass
92
+ --train_batch_size=8
93
+ --gradient_accumulation_steps=6
94
+ --max_grad_norm=1
95
+ --gradient_checkpointing
96
+ --lr_warmup_steps=0
97
+ #--scale_weight_norms=1
98
+ # LR Scheduling
99
+ --max_train_steps=$STEPS
100
+ --learning_rate=0.0005
101
+ --unet_lr=0.0002
102
+ --text_encoder_lr=0.0001
103
+ --lr_scheduler="cosine"
104
+ --lr_scheduler_args="num_cycles=0.375"
105
+ # Noise
106
+ --multires_noise_iterations=12
107
+ --multires_noise_discount=0.4
108
+ #--min_snr_gamma=1
109
+ # Optimization, details
110
+ --no_half_vae
111
+ --sdpa
112
+ --mixed_precision="bf16"
113
+ # Saving
114
+ --save_model_as="safetensors"
115
+ --save_precision="fp16"
116
+ --save_every_n_steps=100
117
+ # Saving States
118
+ #--save_state
119
+ # Either resume from a saved state
120
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
121
+ #--skip_until_initial_step
122
+ # Or from a checkpoint
123
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
124
+ #--initial_step=120
125
+ # Sampling
126
+ --sample_every_n_steps=100
127
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
128
+ --sample_sampler="euler_a"
129
+ --caption_extension=".txt"
130
+ )
131
+ cd ~/source/repos/sd-scripts
132
+ #accelerate launch --num_cpu_threads_per_process=2
133
+ python "./sdxl_train_network.py" "${args[@]}"
134
+ cd ~
training_scripts/tp-by_melak ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_melak-v1s400"
21
+ TRAINING_DIR="/home/kade/datasets/by_melak"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # Keep Tokens
28
+ --keep_tokens=1
29
+ --keep_tokens_separator="|||"
30
+ # Model
31
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
32
+ # Output, logging
33
+ --output_dir="$OUTPUT_DIR/$NAME"
34
+ --output_name="$NAME"
35
+ --log_prefix="$NAME-"
36
+ --log_with=tensorboard
37
+ --logging_dir="$OUTPUT_DIR/logs"
38
+ --seed=1728871242
39
+ # Dataset
40
+ --train_data_dir="$TRAINING_DIR"
41
+ --dataset_repeats=1
42
+ --resolution="1024,1024"
43
+ --enable_bucket
44
+ --bucket_reso_steps=32
45
+ --min_bucket_reso=256
46
+ --max_bucket_reso=2048
47
+ --flip_aug
48
+ --shuffle_caption
49
+ --cache_latents
50
+ --cache_latents_to_disk
51
+ --max_data_loader_n_workers=8
52
+ --persistent_data_loader_workers
53
+ # Network config
54
+ --network_dim=8
55
+ --network_alpha=4
56
+ --network_module="lycoris.kohya"
57
+ --network_args
58
+ "preset=full"
59
+ "conv_dim=256"
60
+ "conv_alpha=4"
61
+ "rank_dropout=0"
62
+ "module_dropout=0"
63
+ "use_tucker=False"
64
+ "use_scalar=False"
65
+ "rank_dropout_scale=False"
66
+ "algo=locon"
67
+ "dora_wd=False"
68
+ "train_norm=False"
69
+ --network_dropout=0
70
+ # Optimizer config
71
+ --optimizer_type=FCompass
72
+ --train_batch_size=8
73
+ --gradient_accumulation_steps=6
74
+ --max_grad_norm=1
75
+ --gradient_checkpointing
76
+ #--lr_warmup_steps=6
77
+ #--scale_weight_norms=1
78
+ # LR Scheduling
79
+ --max_train_steps=400
80
+ --learning_rate=0.0002
81
+ --unet_lr=0.0002
82
+ --text_encoder_lr=0.0001
83
+ --lr_scheduler="cosine"
84
+ --lr_scheduler_args="num_cycles=0.375"
85
+ # Noise
86
+ --multires_noise_iterations=12
87
+ --multires_noise_discount=0.4
88
+ #--min_snr_gamma=1
89
+ # Optimization, details
90
+ --no_half_vae
91
+ --sdpa
92
+ --mixed_precision="bf16"
93
+ # Saving
94
+ --save_model_as="safetensors"
95
+ --save_precision="fp16"
96
+ --save_every_n_steps=100
97
+ # Saving States
98
+ #--save_state
99
+ # Either resume from a saved state
100
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
101
+ #--skip_until_initial_step
102
+ # Or from a checkpoint
103
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
104
+ #--initial_step=120
105
+ # Sampling
106
+ --sample_every_n_steps=100
107
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
108
+ --sample_sampler="euler_a"
109
+ --caption_extension=".txt"
110
+ )
111
+
112
+ cd ~/source/repos/sd-scripts
113
+ #accelerate launch --num_cpu_threads_per_process=2
114
+ python "./sdxl_train_network.py" "${args[@]}"
115
+ cd ~
116
+
training_scripts/tp-by_randitawu ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_randitawu-v1s400"
21
+ TRAINING_DIR="/home/kade/datasets/by_randitawu"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # Model
28
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
29
+ # Keep Tokens
30
+ --keep_tokens=1
31
+ --keep_tokens_separator="|||"
32
+ # Output, logging
33
+ --output_dir="$OUTPUT_DIR/$NAME"
34
+ --output_name="$NAME"
35
+ --log_prefix="$NAME-"
36
+ --log_with=tensorboard
37
+ --logging_dir="$OUTPUT_DIR/logs"
38
+ --seed=1728871242
39
+ # Dataset
40
+ --train_data_dir="$TRAINING_DIR"
41
+ --dataset_repeats=1
42
+ --resolution="1024,1024"
43
+ --enable_bucket
44
+ --bucket_reso_steps=32
45
+ --min_bucket_reso=256
46
+ --max_bucket_reso=2048
47
+ --flip_aug
48
+ --shuffle_caption
49
+ --cache_latents
50
+ --cache_latents_to_disk
51
+ --max_data_loader_n_workers=8
52
+ --persistent_data_loader_workers
53
+ # Network config
54
+ --network_dim=8
55
+ --network_alpha=4
56
+ --network_module="lycoris.kohya"
57
+ --network_args
58
+ "preset=full"
59
+ "conv_dim=64"
60
+ "conv_alpha=2"
61
+ "rank_dropout=0"
62
+ "module_dropout=0"
63
+ "use_tucker=False"
64
+ "use_scalar=False"
65
+ "rank_dropout_scale=False"
66
+ "algo=lokr"
67
+ "dora_wd=True"
68
+ "train_norm=False"
69
+ --network_dropout=0
70
+ # Optimizer config
71
+ --optimizer_type=FCompass
72
+ --train_batch_size=12
73
+ --gradient_accumulation_steps=4
74
+ --max_grad_norm=1
75
+ --gradient_checkpointing
76
+ #--lr_warmup_steps=6
77
+ #--scale_weight_norms=1
78
+ # LR Scheduling
79
+ --max_train_steps=400
80
+ --learning_rate=0.0002
81
+ --unet_lr=0.0002
82
+ --text_encoder_lr=0.0001
83
+ --lr_scheduler="cosine"
84
+ --lr_scheduler_args="num_cycles=0.375"
85
+ # Noise
86
+ #--multires_noise_iterations=12
87
+ #--multires_noise_discount=0.4
88
+ #--min_snr_gamma=1
89
+ # Optimization, details
90
+ --no_half_vae
91
+ --sdpa
92
+ --mixed_precision="bf16"
93
+ # Saving
94
+ --save_model_as="safetensors"
95
+ --save_precision="fp16"
96
+ --save_every_n_steps=100
97
+ # Saving States
98
+ #--save_state
99
+ # Either resume from a saved state
100
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
101
+ #--skip_until_initial_step
102
+ # Or from a checkpoint
103
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
104
+ #--initial_step=120
105
+ # Sampling
106
+ --sample_every_n_steps=100
107
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
108
+ --sample_sampler="euler_a"
109
+ --caption_extension=".txt"
110
+ )
111
+
112
+ cd ~/source/repos/sd-scripts
113
+ #accelerate launch --num_cpu_threads_per_process=2
114
+ python "./sdxl_train_network.py" "${args[@]}"
115
+ cd ~
116
+
training_scripts/tp-by_randitawu_normal ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_randitawu-v2s400"
21
+ TRAINING_DIR="/home/kade/datasets/by_randitawu"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # Keep Tokens
28
+ --keep_tokens=1
29
+ --keep_tokens_separator="|||"
30
+ # Model
31
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
32
+ # Output, logging
33
+ --output_dir="$OUTPUT_DIR/$NAME"
34
+ --output_name="$NAME"
35
+ --log_prefix="$NAME-"
36
+ --log_with=tensorboard
37
+ --logging_dir="$OUTPUT_DIR/logs"
38
+ --seed=1728871242
39
+ # Dataset
40
+ --train_data_dir="$TRAINING_DIR"
41
+ --dataset_repeats=1
42
+ --resolution="1024,1024"
43
+ --enable_bucket
44
+ --bucket_reso_steps=32
45
+ --min_bucket_reso=256
46
+ --max_bucket_reso=2048
47
+ --flip_aug
48
+ --shuffle_caption
49
+ --cache_latents
50
+ --cache_latents_to_disk
51
+ --max_data_loader_n_workers=8
52
+ --persistent_data_loader_workers
53
+ # Network config
54
+ --network_dim=8
55
+ --network_alpha=4
56
+ --network_module="lycoris.kohya"
57
+ --network_args
58
+ "preset=full"
59
+ "conv_dim=256"
60
+ "conv_alpha=4"
61
+ "rank_dropout=0"
62
+ "module_dropout=0"
63
+ "use_tucker=False"
64
+ "use_scalar=False"
65
+ "rank_dropout_scale=False"
66
+ "algo=locon"
67
+ "dora_wd=False"
68
+ "train_norm=False"
69
+ --network_dropout=0
70
+ # Optimizer config
71
+ --optimizer_type=FCompass
72
+ --train_batch_size=8
73
+ --gradient_accumulation_steps=6
74
+ --max_grad_norm=1
75
+ --gradient_checkpointing
76
+ #--lr_warmup_steps=6
77
+ #--scale_weight_norms=1
78
+ # LR Scheduling
79
+ --max_train_steps=400
80
+ --learning_rate=0.0002
81
+ --unet_lr=0.0002
82
+ --text_encoder_lr=0.0001
83
+ --lr_scheduler="cosine"
84
+ --lr_scheduler_args="num_cycles=0.375"
85
+ # Noise
86
+ --multires_noise_iterations=12
87
+ --multires_noise_discount=0.4
88
+ #--min_snr_gamma=1
89
+ # Optimization, details
90
+ --no_half_vae
91
+ --sdpa
92
+ --mixed_precision="bf16"
93
+ # Saving
94
+ --save_model_as="safetensors"
95
+ --save_precision="fp16"
96
+ --save_every_n_steps=100
97
+ # Saving States
98
+ #--save_state
99
+ # Either resume from a saved state
100
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
101
+ #--skip_until_initial_step
102
+ # Or from a checkpoint
103
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
104
+ #--initial_step=120
105
+ # Sampling
106
+ --sample_every_n_steps=100
107
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
108
+ --sample_sampler="euler_a"
109
+ --caption_extension=".txt"
110
+ )
111
+
112
+ cd ~/source/repos/sd-scripts
113
+ #accelerate launch --num_cpu_threads_per_process=2
114
+ python "./sdxl_train_network.py" "${args[@]}"
115
+ cd ~
116
+
training_scripts/tp-by_wolfy-nail-lokr ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_wolfy-nail-v3s3000"
21
+ TRAINING_DIR="/home/kade/datasets/by_wolfy-nail"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # ⚠️ TODO: Benchmark...
28
+ --debiased_estimation_loss
29
+ # ⚠️ TODO: What does this do? Does it even work?
30
+ --max_token_length=225
31
+ # Keep Tokens
32
+ --keep_tokens=1
33
+ --keep_tokens_separator="|||"
34
+ # Model
35
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
36
+ # Output, logging
37
+ --output_dir="$OUTPUT_DIR/$NAME"
38
+ --output_name="$NAME"
39
+ --log_prefix="$NAME-"
40
+ --log_with=tensorboard
41
+ --logging_dir="$OUTPUT_DIR/logs"
42
+ --seed=1728871242
43
+ # Dataset
44
+ --train_data_dir="$TRAINING_DIR"
45
+ --dataset_repeats=1
46
+ --resolution="1024,1024"
47
+ --enable_bucket
48
+ --bucket_reso_steps=64
49
+ --min_bucket_reso=256
50
+ --max_bucket_reso=2048
51
+ --flip_aug
52
+ --shuffle_caption
53
+ --cache_latents
54
+ --cache_latents_to_disk
55
+ --max_data_loader_n_workers=8
56
+ --persistent_data_loader_workers
57
+ # Network config
58
+ --network_dim=100000
59
+ # ⚠️ TODO: Plot
60
+ --network_alpha=64
61
+ --network_module="lycoris.kohya"
62
+ --network_args
63
+ "preset=full"
64
+ "conv_dim=100000"
65
+ "decompose_both=False"
66
+ "conv_alpha=64"
67
+ "rank_dropout=0"
68
+ "module_dropout=0"
69
+ "use_tucker=False"
70
+ "use_scalar=False"
71
+ "rank_dropout_scale=False"
72
+ "algo=lokr"
73
+ "bypass_mode=False"
74
+ "factor=32"
75
+ "use_cp=True"
76
+ "dora_wd=True"
77
+ "train_norm=False"
78
+ --network_dropout=0
79
+ # Optimizer config
80
+ --optimizer_type=FCompass
81
+ --train_batch_size=8
82
+ --gradient_accumulation_steps=6
83
+ --max_grad_norm=1
84
+ --gradient_checkpointing
85
+ --lr_warmup_steps=0
86
+ #--scale_weight_norms=1
87
+ # LR Scheduling
88
+ --max_train_steps=4096
89
+ --learning_rate=0.0005
90
+ --unet_lr=0.0002
91
+ --text_encoder_lr=0.0001
92
+ --lr_scheduler="cosine"
93
+ --lr_scheduler_args="num_cycles=0.375"
94
+ # Noise
95
+ --multires_noise_iterations=12
96
+ --multires_noise_discount=0.4
97
+ #--min_snr_gamma=1
98
+ # Optimization, details
99
+ --no_half_vae
100
+ --sdpa
101
+ --mixed_precision="bf16"
102
+ # Saving
103
+ --save_model_as="safetensors"
104
+ --save_precision="fp16"
105
+ --save_every_n_steps=100
106
+ # Saving States
107
+ #--save_state
108
+ # Either resume from a saved state
109
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
110
+ #--skip_until_initial_step
111
+ # Or from a checkpoint
112
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
113
+ #--initial_step=120
114
+ # Sampling
115
+ --sample_every_n_steps=100
116
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
117
+ --sample_sampler="euler_a"
118
+ --caption_extension=".txt"
119
+ )
120
+
121
+ cd ~/source/repos/sd-scripts
122
+ #accelerate launch --num_cpu_threads_per_process=2
123
+ python "./sdxl_train_network.py" "${args[@]}"
124
+ cd ~
125
+
training_scripts/tp-curse_of_the_worgen ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+
4
+ # >>> conda initialize >>>
5
+ # !! Contents within this block are managed by 'conda init' !!
6
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
7
+ if [ $? -eq 0 ]; then
8
+ eval "$__conda_setup"
9
+ else
10
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
11
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
12
+ else
13
+ export PATH="/home/kade/miniconda3/bin:$PATH"
14
+ fi
15
+ fi
16
+ unset __conda_setup
17
+ # <<< conda initialize <<<
18
+
19
+
20
+ conda activate sdscripts
21
+
22
+ NAME="cotw-v1s400"
23
+ TRAINING_DIR="/home/kade/datasets/curse_of_the_worgen"
24
+ OUTPUT_DIR="/home/kade/output_dir"
25
+
26
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
27
+ # --min_snr_gamma=1
28
+ args=(
29
+ # Model
30
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
31
+ # Keep Tokens
32
+ --keep_tokens=1
33
+ --keep_tokens_separator="|||"
34
+ # Output, logging
35
+ --output_dir="$OUTPUT_DIR/$NAME"
36
+ --output_name="$NAME"
37
+ --log_prefix="$NAME-"
38
+ --log_with=tensorboard
39
+ --logging_dir="$OUTPUT_DIR/logs"
40
+ --seed=1728871242
41
+ # Dataset
42
+ --train_data_dir="$TRAINING_DIR"
43
+ --dataset_repeats=1
44
+ --resolution="1024,1024"
45
+ --enable_bucket
46
+ --bucket_reso_steps=32
47
+ --min_bucket_reso=256
48
+ --max_bucket_reso=2048
49
+ --flip_aug
50
+ --shuffle_caption
51
+ --cache_latents
52
+ --cache_latents_to_disk
53
+ --max_data_loader_n_workers=8
54
+ --persistent_data_loader_workers
55
+ # Network config
56
+ --network_dim=8
57
+ --network_alpha=4
58
+ --network_module="lycoris.kohya"
59
+ --network_args
60
+ "preset=full"
61
+ "conv_dim=64"
62
+ "conv_alpha=2"
63
+ "rank_dropout=0"
64
+ "module_dropout=0"
65
+ "use_tucker=False"
66
+ "use_scalar=False"
67
+ "rank_dropout_scale=False"
68
+ "algo=lokr"
69
+ "dora_wd=True"
70
+ "train_norm=False"
71
+ --network_dropout=0
72
+ # Optimizer config
73
+ --optimizer_type=FCompass
74
+ --train_batch_size=12
75
+ --gradient_accumulation_steps=4
76
+ --max_grad_norm=1
77
+ --gradient_checkpointing
78
+ #--lr_warmup_steps=6
79
+ #--scale_weight_norms=1
80
+ # LR Scheduling
81
+ --max_train_steps=400
82
+ --learning_rate=0.0002
83
+ --unet_lr=0.0002
84
+ --text_encoder_lr=0.0001
85
+ --lr_scheduler="cosine"
86
+ --lr_scheduler_args="num_cycles=0.375"
87
+ # Noise
88
+ #--multires_noise_iterations=12
89
+ #--multires_noise_discount=0.4
90
+ #--min_snr_gamma=1
91
+ # Optimization, details
92
+ --no_half_vae
93
+ --sdpa
94
+ --mixed_precision="bf16"
95
+ # Saving
96
+ --save_model_as="safetensors"
97
+ --save_precision="fp16"
98
+ --save_every_n_steps=100
99
+ # Saving States
100
+ #--save_state
101
+ # Either resume from a saved state
102
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
103
+ #--skip_until_initial_step
104
+ # Or from a checkpoint
105
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
106
+ #--initial_step=120
107
+ # Sampling
108
+ --sample_every_n_steps=100
109
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
110
+ --sample_sampler="euler_a"
111
+ --caption_extension=".txt"
112
+ )
113
+
114
+ cd ~/source/repos/sd-scripts
115
+ #accelerate launch --num_cpu_threads_per_process=2
116
+ python "./sdxl_train_network.py" "${args[@]}"
117
+ cd ~
118
+
training_scripts/tp-curse_of_the_worgen_normal ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="cotw-v2s400"
21
+ TRAINING_DIR="/home/kade/datasets/curse_of_the_worgen"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # Keep Tokens
28
+ --keep_tokens=1
29
+ --keep_tokens_separator="|||"
30
+ # Model
31
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
32
+ # Output, logging
33
+ --output_dir="$OUTPUT_DIR/$NAME"
34
+ --output_name="$NAME"
35
+ --log_prefix="$NAME-"
36
+ --log_with=tensorboard
37
+ --logging_dir="$OUTPUT_DIR/logs"
38
+ --seed=1728871242
39
+ # Dataset
40
+ --train_data_dir="$TRAINING_DIR"
41
+ --dataset_repeats=1
42
+ --resolution="1024,1024"
43
+ --enable_bucket
44
+ --bucket_reso_steps=32
45
+ --min_bucket_reso=256
46
+ --max_bucket_reso=2048
47
+ --flip_aug
48
+ --shuffle_caption
49
+ --cache_latents
50
+ --cache_latents_to_disk
51
+ --max_data_loader_n_workers=8
52
+ --persistent_data_loader_workers
53
+ # Network config
54
+ --network_dim=8
55
+ --network_alpha=4
56
+ --network_module="lycoris.kohya"
57
+ --network_args
58
+ "preset=full"
59
+ "conv_dim=256"
60
+ "conv_alpha=4"
61
+ "rank_dropout=0"
62
+ "module_dropout=0"
63
+ "use_tucker=False"
64
+ "use_scalar=False"
65
+ "rank_dropout_scale=False"
66
+ "algo=locon"
67
+ "dora_wd=False"
68
+ "train_norm=False"
69
+ --network_dropout=0
70
+ # Optimizer config
71
+ --optimizer_type=FCompass
72
+ --train_batch_size=8
73
+ --gradient_accumulation_steps=6
74
+ --max_grad_norm=1
75
+ --gradient_checkpointing
76
+ #--lr_warmup_steps=6
77
+ #--scale_weight_norms=1
78
+ # LR Scheduling
79
+ --max_train_steps=400
80
+ --learning_rate=0.0002
81
+ --unet_lr=0.0002
82
+ --text_encoder_lr=0.0001
83
+ --lr_scheduler="cosine"
84
+ --lr_scheduler_args="num_cycles=0.375"
85
+ # Noise
86
+ --multires_noise_iterations=12
87
+ --multires_noise_discount=0.4
88
+ #--min_snr_gamma=1
89
+ # Optimization, details
90
+ --no_half_vae
91
+ --sdpa
92
+ --mixed_precision="bf16"
93
+ # Saving
94
+ --save_model_as="safetensors"
95
+ --save_precision="fp16"
96
+ --save_every_n_steps=100
97
+ # Saving States
98
+ #--save_state
99
+ # Either resume from a saved state
100
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
101
+ #--skip_until_initial_step
102
+ # Or from a checkpoint
103
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
104
+ #--initial_step=120
105
+ # Sampling
106
+ --sample_every_n_steps=100
107
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
108
+ --sample_sampler="euler_a"
109
+ --caption_extension=".txt"
110
+ )
111
+
112
+ cd ~/source/repos/sd-scripts
113
+ #accelerate launch --num_cpu_threads_per_process=2
114
+ python "./sdxl_train_network.py" "${args[@]}"
115
+ cd ~
116
+
training_scripts/tp-maliketh ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="maliketh-v2s2000"
21
+ TRAINING_DIR="/home/kade/datasets/maliketh"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=0.0625
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=1"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=True"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=16"
86
+ "dora_wd=True"
87
+ "train_norm=False"
88
+ --network_dropout=0
89
+ # Optimizer config
90
+ --optimizer_type=ClybW
91
+ --train_batch_size=14
92
+ #--gradient_accumulation_steps=1
93
+ --max_grad_norm=1
94
+ --gradient_checkpointing
95
+ #--scale_weight_norms=1
96
+ # LR Scheduling
97
+ --max_train_steps=$STEPS
98
+ --lr_warmup_steps=0
99
+ --learning_rate=0.0001
100
+ --unet_lr=0.0002
101
+ --text_encoder_lr=0.0001
102
+ --lr_scheduler="cosine"
103
+ --lr_scheduler_args="num_cycles=0.375"
104
+ # Noise
105
+ --multires_noise_iterations=12
106
+ --multires_noise_discount=0.4
107
+ #--min_snr_gamma=1
108
+ # Optimization, details
109
+ --no_half_vae
110
+ --sdpa
111
+ --mixed_precision="bf16"
112
+ # Saving
113
+ --save_model_as="safetensors"
114
+ --save_precision="fp16"
115
+ --save_every_n_steps=100
116
+ # Saving States
117
+ #--save_state
118
+ # Either resume from a saved state
119
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
120
+ #--skip_until_initial_step
121
+ # Or from a checkpoint
122
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
123
+ #--initial_step=120
124
+ # Sampling
125
+ --sample_every_n_steps=1
126
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
127
+ --sample_sampler="euler_a"
128
+ --sample_at_first
129
+ --caption_extension=".txt"
130
+ )
131
+ cd ~/source/repos/sd-scripts
132
+ #accelerate launch --num_cpu_threads_per_process=2
133
+ python "./sdxl_train_network.py" "${args[@]}"
134
+ cd ~
training_scripts/tp-maliketh-highalpha ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="maliketh-v3s2000"
21
+ TRAINING_DIR="/home/kade/datasets/maliketh"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=True"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=16"
86
+ "dora_wd=True"
87
+ "train_norm=False"
88
+ --network_dropout=0
89
+ # Optimizer config
90
+ --optimizer_type=ClybW
91
+ --train_batch_size=14
92
+ #--gradient_accumulation_steps=1
93
+ --max_grad_norm=1
94
+ --gradient_checkpointing
95
+ #--scale_weight_norms=1
96
+ # LR Scheduling
97
+ --max_train_steps=$STEPS
98
+ --lr_warmup_steps=0
99
+ --learning_rate=0.0001
100
+ --unet_lr=0.0002
101
+ --text_encoder_lr=0.0001
102
+ --lr_scheduler="cosine"
103
+ --lr_scheduler_args="num_cycles=0.375"
104
+ # Noise
105
+ --multires_noise_iterations=12
106
+ --multires_noise_discount=0.4
107
+ #--min_snr_gamma=1
108
+ # Optimization, details
109
+ --no_half_vae
110
+ --sdpa
111
+ --mixed_precision="bf16"
112
+ # Saving
113
+ --save_model_as="safetensors"
114
+ --save_precision="fp16"
115
+ --save_every_n_steps=100
116
+ # Saving States
117
+ #--save_state
118
+ # Either resume from a saved state
119
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
120
+ #--skip_until_initial_step
121
+ # Or from a checkpoint
122
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
123
+ #--initial_step=120
124
+ # Sampling
125
+ --sample_every_n_steps=1
126
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
127
+ --sample_sampler="euler_a"
128
+ --sample_at_first
129
+ --caption_extension=".txt"
130
+ )
131
+ cd ~/source/repos/sd-scripts
132
+ #accelerate launch --num_cpu_threads_per_process=2
133
+ python "./sdxl_train_network.py" "${args[@]}"
134
+ cd ~
training_scripts/tp-nextgen ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_darkgem-v5s2400"
21
+ TRAINING_DIR="/home/kade/datasets/by_darkgem"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=True"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=16"
86
+ "dora_wd=True"
87
+ "train_norm=False"
88
+ --network_dropout=0
89
+ # Optimizer config
90
+ --optimizer_type=FCompass
91
+ --train_batch_size=8
92
+ --gradient_accumulation_steps=6
93
+ --max_grad_norm=1
94
+ --gradient_checkpointing
95
+ #--scale_weight_norms=1
96
+ # LR Scheduling
97
+ --max_train_steps=$STEPS
98
+ --lr_warmup_steps=0
99
+ --learning_rate=0.0002
100
+ --unet_lr=0.0002
101
+ --text_encoder_lr=0.0001
102
+ --lr_scheduler="cosine"
103
+ --lr_scheduler_args="num_cycles=0.375"
104
+ # Noise
105
+ --multires_noise_iterations=12
106
+ --multires_noise_discount=0.4
107
+ #--min_snr_gamma=1
108
+ # Optimization, details
109
+ --no_half_vae
110
+ --sdpa
111
+ --mixed_precision="bf16"
112
+ # Saving
113
+ --save_model_as="safetensors"
114
+ --save_precision="fp16"
115
+ --save_every_n_steps=100
116
+ # Saving States
117
+ #--save_state
118
+ # Either resume from a saved state
119
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
120
+ #--skip_until_initial_step
121
+ # Or from a checkpoint
122
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
123
+ #--initial_step=120
124
+ # Sampling
125
+ --sample_every_n_steps=100
126
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
127
+ --sample_sampler="euler_a"
128
+ --sample_at_first
129
+ --caption_extension=".txt"
130
+ )
131
+ cd ~/source/repos/sd-scripts
132
+ #accelerate launch --num_cpu_threads_per_process=2
133
+ python "./sdxl_train_network.py" "${args[@]}"
134
+ cd ~
training_scripts/tp-nitw ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <<<
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="nitw-v1s1200"
21
+ TRAINING_DIR="/home/kade/datasets/night_in_the_woods"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
25
+ # --min_snr_gamma=1
26
+ args=(
27
+ # ⚠️ TODO: Benchmark...
28
+ --debiased_estimation_loss
29
+ # ⚠️ TODO: What does this do? Does it even work?
30
+ --max_token_length=225
31
+ # Keep Tokens
32
+ --keep_tokens=1
33
+ --keep_tokens_separator="|||"
34
+ # Model
35
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
36
+ # Output, logging
37
+ --output_dir="$OUTPUT_DIR/$NAME"
38
+ --output_name="$NAME"
39
+ --log_prefix="$NAME-"
40
+ --log_with=tensorboard
41
+ --logging_dir="$OUTPUT_DIR/logs"
42
+ --seed=1728871242
43
+ # Dataset
44
+ --train_data_dir="$TRAINING_DIR"
45
+ --dataset_repeats=1
46
+ --resolution="1024,1024"
47
+ --enable_bucket
48
+ --bucket_reso_steps=64
49
+ --min_bucket_reso=256
50
+ --max_bucket_reso=2048
51
+ --flip_aug
52
+ --shuffle_caption
53
+ --cache_latents
54
+ --cache_latents_to_disk
55
+ --max_data_loader_n_workers=8
56
+ --persistent_data_loader_workers
57
+ # Network config
58
+ --network_dim=100000
59
+ # ⚠️ TODO: Plot
60
+ --network_alpha=64
61
+ --network_module="lycoris.kohya"
62
+ --network_args
63
+ "preset=full"
64
+ "conv_dim=100000"
65
+ "decompose_both=False"
66
+ "conv_alpha=64"
67
+ "rank_dropout=0"
68
+ "module_dropout=0"
69
+ "use_tucker=False"
70
+ "use_scalar=False"
71
+ "rank_dropout_scale=False"
72
+ "algo=lokr"
73
+ "bypass_mode=False"
74
+ "factor=32"
75
+ "use_cp=True"
76
+ "dora_wd=True"
77
+ "train_norm=False"
78
+ --network_dropout=0
79
+ # Optimizer config
80
+ --optimizer_type=FCompass
81
+ --train_batch_size=8
82
+ --gradient_accumulation_steps=6
83
+ --max_grad_norm=1
84
+ --gradient_checkpointing
85
+ --lr_warmup_steps=0
86
+ #--scale_weight_norms=1
87
+ # LR Scheduling
88
+ --max_train_steps=1200
89
+ --learning_rate=0.0005
90
+ --unet_lr=0.0002
91
+ --text_encoder_lr=0.0001
92
+ --lr_scheduler="cosine"
93
+ --lr_scheduler_args="num_cycles=0.375"
94
+ # Noise
95
+ --multires_noise_iterations=12
96
+ --multires_noise_discount=0.4
97
+ #--min_snr_gamma=1
98
+ # Optimization, details
99
+ --no_half_vae
100
+ --sdpa
101
+ --mixed_precision="bf16"
102
+ # Saving
103
+ --save_model_as="safetensors"
104
+ --save_precision="fp16"
105
+ --save_every_n_steps=100
106
+ # Saving States
107
+ #--save_state
108
+ # Either resume from a saved state
109
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
110
+ #--skip_until_initial_step
111
+ # Or from a checkpoint
112
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
113
+ #--initial_step=120
114
+ # Sampling
115
+ --sample_every_n_steps=20
116
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
117
+ --sample_sampler="euler_a"
118
+ --caption_extension=".txt"
119
+ )
120
+
121
+ cd ~/source/repos/sd-scripts
122
+ #accelerate launch --num_cpu_threads_per_process=2
123
+ python "./sdxl_train_network.py" "${args[@]}"
124
+ cd ~
125
+
training_scripts/tp-oldie ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+ conda activate sdscripts
4
+
5
+ NAME="bigfur-v1s2000"
6
+ TRAINING_DIR="/home/kade/training/training_dir"
7
+ OUTPUT_DIR="/home/kade/output_dir"
8
+
9
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
10
+ # --min_snr_gamma=1
11
+ args=(
12
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
13
+ # Output, logging
14
+ --output_dir="$OUTPUT_DIR/$NAME"
15
+ --output_name="$NAME"
16
+ --log_prefix="$NAME-"
17
+ --log_with=tensorboard
18
+ --logging_dir="$OUTPUT_DIR/logs"
19
+ --seed=1728871242
20
+ # Dataset
21
+ --train_data_dir="$TRAINING_DIR"
22
+ --dataset_repeats=1
23
+ --resolution="1024,1024"
24
+ --enable_bucket
25
+ --bucket_reso_steps=32
26
+ --min_bucket_reso=256
27
+ --max_bucket_reso=2048
28
+ --flip_aug
29
+ --shuffle_caption
30
+ --cache_latents
31
+ --cache_latents_to_disk
32
+ --max_data_loader_n_workers=8
33
+ --persistent_data_loader_workers
34
+ # Network config
35
+ --network_dim=8
36
+ --network_alpha=4
37
+ --network_module="lycoris.kohya"
38
+ --network_args
39
+ "preset=full"
40
+ "conv_dim=256"
41
+ "conv_alpha=4"
42
+ "rank_dropout=0"
43
+ "module_dropout=0"
44
+ "use_tucker=False"
45
+ "use_scalar=False"
46
+ "rank_dropout_scale=False"
47
+ "algo=locon"
48
+ "dora_wd=False"
49
+ "train_norm=False"
50
+ --network_dropout=0
51
+ # Optimizer config
52
+ --optimizer_type=ClybW
53
+ --train_batch_size=8
54
+ --gradient_accumulation_steps=6
55
+ --max_grad_norm=1
56
+ --gradient_checkpointing
57
+ #--lr_warmup_steps=6
58
+ #--scale_weight_norms=1
59
+ # LR Scheduling
60
+ --max_train_steps=2000
61
+ --learning_rate=0.0002
62
+ --unet_lr=0.0002
63
+ --text_encoder_lr=0.0001
64
+ --lr_scheduler="cosine"
65
+ --lr_scheduler_args="num_cycles=0.375"
66
+ # Noise
67
+ --multires_noise_iterations=12
68
+ --multires_noise_discount=0.4
69
+ #--min_snr_gamma=1
70
+ # Optimization, details
71
+ --no_half_vae
72
+ --sdpa
73
+ --mixed_precision="bf16"
74
+ # Saving
75
+ --save_model_as="safetensors"
76
+ --save_precision="fp16"
77
+ --save_every_n_steps=100
78
+ # Saving States
79
+ #--save_state
80
+ # Either resume from a saved state
81
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
82
+ #--skip_until_initial_step
83
+ # Or from a checkpoint
84
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
85
+ #--initial_step=120
86
+ # Sampling
87
+ --sample_every_n_steps=100
88
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
89
+ --sample_sampler="euler_a"
90
+ --caption_extension=".txt"
91
+ )
92
+
93
+ cd ~/source/repos/sd-scripts
94
+ #accelerate launch --num_cpu_threads_per_process=2
95
+ python "./sdxl_train_network.py" "${args[@]}"
96
+ cd ~
97
+
training_scripts/tp-promo_animals ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+
4
+ # >>> conda initialize >>>
5
+ # !! Contents within this block are managed by 'conda init' !!
6
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
7
+ if [ $? -eq 0 ]; then
8
+ eval "$__conda_setup"
9
+ else
10
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
11
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
12
+ else
13
+ export PATH="/home/kade/miniconda3/bin:$PATH"
14
+ fi
15
+ fi
16
+ unset __conda_setup
17
+ # <<< conda initialize <<<
18
+
19
+
20
+ conda activate sdscripts
21
+
22
+ NAME="promo_animals-v1s400"
23
+ TRAINING_DIR="/home/kade/datasets/promo_animals/bullseye"
24
+ OUTPUT_DIR="/home/kade/output_dir"
25
+
26
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
27
+ # --min_snr_gamma=1
28
+ args=(
29
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
30
+ # Output, logging
31
+ --output_dir="$OUTPUT_DIR/$NAME"
32
+ --output_name="$NAME"
33
+ --log_prefix="$NAME-"
34
+ --log_with=tensorboard
35
+ --logging_dir="$OUTPUT_DIR/logs"
36
+ --seed=1728871242
37
+ # Dataset
38
+ --train_data_dir="$TRAINING_DIR"
39
+ --dataset_repeats=1
40
+ --resolution="1024,1024"
41
+ --enable_bucket
42
+ --bucket_reso_steps=32
43
+ --min_bucket_reso=256
44
+ --max_bucket_reso=2048
45
+ --flip_aug
46
+ --shuffle_caption
47
+ --cache_latents
48
+ --cache_latents_to_disk
49
+ --max_data_loader_n_workers=8
50
+ --persistent_data_loader_workers
51
+ # Network config
52
+ --network_dim=8
53
+ --network_alpha=4
54
+ --network_module="lycoris.kohya"
55
+ --network_args
56
+ "preset=full"
57
+ "conv_dim=256"
58
+ "conv_alpha=4"
59
+ "rank_dropout=0"
60
+ "module_dropout=0"
61
+ "use_tucker=False"
62
+ "use_scalar=False"
63
+ "rank_dropout_scale=False"
64
+ "algo=locon"
65
+ "dora_wd=False"
66
+ "train_norm=False"
67
+ --network_dropout=0
68
+ # Optimizer config
69
+ --optimizer_type=FCompass
70
+ --train_batch_size=12
71
+ --gradient_accumulation_steps=4
72
+ --max_grad_norm=1
73
+ --gradient_checkpointing
74
+ #--lr_warmup_steps=6
75
+ #--scale_weight_norms=1
76
+ # LR Scheduling
77
+ --max_train_steps=600
78
+ --learning_rate=0.0002
79
+ --unet_lr=0.0002
80
+ --text_encoder_lr=0.0001
81
+ --lr_scheduler="cosine"
82
+ --lr_scheduler_args="num_cycles=0.375"
83
+ # Noise
84
+ --multires_noise_iterations=12
85
+ --multires_noise_discount=0.4
86
+ #--min_snr_gamma=1
87
+ # Optimization, details
88
+ --no_half_vae
89
+ --sdpa
90
+ --mixed_precision="bf16"
91
+ # Saving
92
+ --save_model_as="safetensors"
93
+ --save_precision="fp16"
94
+ --save_every_n_steps=100
95
+ # Saving States
96
+ #--save_state
97
+ # Either resume from a saved state
98
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
99
+ #--skip_until_initial_step
100
+ # Or from a checkpoint
101
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
102
+ #--initial_step=120
103
+ # Sampling
104
+ --sample_every_n_steps=100
105
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
106
+ --sample_sampler="euler_a"
107
+ --caption_extension=".txt"
108
+ )
109
+
110
+ cd ~/source/repos/sd-scripts
111
+ #accelerate launch --num_cpu_threads_per_process=2
112
+ python "./sdxl_train_network.py" "${args[@]}"
113
+ cd ~
114
+
training_scripts/tp-promo_animals-bullseye ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+
4
+ # >>> conda initialize >>>
5
+ # !! Contents within this block are managed by 'conda init' !!
6
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
7
+ if [ $? -eq 0 ]; then
8
+ eval "$__conda_setup"
9
+ else
10
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
11
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
12
+ else
13
+ export PATH="/home/kade/miniconda3/bin:$PATH"
14
+ fi
15
+ fi
16
+ unset __conda_setup
17
+ # <<< conda initialize <<<
18
+
19
+
20
+ conda activate sdscripts
21
+
22
+ NAME="bullseye-v5s400"
23
+ TRAINING_DIR="/home/kade/datasets/promo_animals/bullseye"
24
+ OUTPUT_DIR="/home/kade/output_dir"
25
+
26
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
27
+ # --min_snr_gamma=1
28
+ args=(
29
+ # Model
30
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
31
+ # Keep Tokens
32
+ --keep_tokens=1
33
+ --keep_tokens_separator="|||"
34
+ # Output, logging
35
+ --output_dir="$OUTPUT_DIR/$NAME"
36
+ --output_name="$NAME"
37
+ --log_prefix="$NAME-"
38
+ --log_with=tensorboard
39
+ --logging_dir="$OUTPUT_DIR/logs"
40
+ --seed=1728871242
41
+ # Dataset
42
+ --train_data_dir="$TRAINING_DIR"
43
+ --dataset_repeats=1
44
+ --resolution="1024,1024"
45
+ --enable_bucket
46
+ --bucket_reso_steps=32
47
+ --min_bucket_reso=256
48
+ --max_bucket_reso=2048
49
+ --flip_aug
50
+ --shuffle_caption
51
+ --cache_latents
52
+ --cache_latents_to_disk
53
+ --max_data_loader_n_workers=8
54
+ --persistent_data_loader_workers
55
+ # Network config
56
+ --network_dim=8
57
+ --network_alpha=4
58
+ --network_module="lycoris.kohya"
59
+ --network_args
60
+ "preset=full"
61
+ "conv_dim=64"
62
+ "conv_alpha=2"
63
+ "rank_dropout=0"
64
+ "module_dropout=0"
65
+ "use_tucker=False"
66
+ "use_scalar=False"
67
+ "rank_dropout_scale=False"
68
+ "algo=lokr"
69
+ "dora_wd=True"
70
+ "train_norm=False"
71
+ --network_dropout=0
72
+ # Optimizer config
73
+ --optimizer_type=FCompass
74
+ --train_batch_size=12
75
+ --gradient_accumulation_steps=4
76
+ --max_grad_norm=1
77
+ --gradient_checkpointing
78
+ #--lr_warmup_steps=6
79
+ #--scale_weight_norms=1
80
+ # LR Scheduling
81
+ --max_train_steps=400
82
+ --learning_rate=0.0002
83
+ --unet_lr=0.0002
84
+ --text_encoder_lr=0.0001
85
+ --lr_scheduler="cosine"
86
+ --lr_scheduler_args="num_cycles=0.375"
87
+ # Noise
88
+ #--multires_noise_iterations=12
89
+ #--multires_noise_discount=0.4
90
+ #--min_snr_gamma=1
91
+ # Optimization, details
92
+ --no_half_vae
93
+ --sdpa
94
+ --mixed_precision="bf16"
95
+ # Saving
96
+ --save_model_as="safetensors"
97
+ --save_precision="fp16"
98
+ --save_every_n_steps=100
99
+ # Saving States
100
+ #--save_state
101
+ # Either resume from a saved state
102
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
103
+ #--skip_until_initial_step
104
+ # Or from a checkpoint
105
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
106
+ #--initial_step=120
107
+ # Sampling
108
+ --sample_every_n_steps=100
109
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
110
+ --sample_sampler="euler_a"
111
+ --caption_extension=".txt"
112
+ )
113
+
114
+ cd ~/source/repos/sd-scripts
115
+ #accelerate launch --num_cpu_threads_per_process=2
116
+ python "./sdxl_train_network.py" "${args[@]}"
117
+ cd ~
118
+
training_scripts/tp-promo_animals-bullseye-clybw ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+
3
+
4
+ # >>> conda initialize >>>
5
+ # !! Contents within this block are managed by 'conda init' !!
6
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
7
+ if [ $? -eq 0 ]; then
8
+ eval "$__conda_setup"
9
+ else
10
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
11
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
12
+ else
13
+ export PATH="/home/kade/miniconda3/bin:$PATH"
14
+ fi
15
+ fi
16
+ unset __conda_setup
17
+ # <<< conda initialize <<<
18
+
19
+
20
+ conda activate sdscripts
21
+
22
+ NAME="bullseye-v2s400"
23
+ TRAINING_DIR="/home/kade/datasets/promo_animals/bullseye"
24
+ OUTPUT_DIR="/home/kade/output_dir"
25
+
26
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
27
+ # --min_snr_gamma=1
28
+ args=(
29
+ # Model
30
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
31
+ # Keep Tokens
32
+ --keep_tokens=1
33
+ --keep_tokens_separator="|||"
34
+ # Output, logging
35
+ --output_dir="$OUTPUT_DIR/$NAME"
36
+ --output_name="$NAME"
37
+ --log_prefix="$NAME-"
38
+ --log_with=tensorboard
39
+ --logging_dir="$OUTPUT_DIR/logs"
40
+ --seed=1728871242
41
+ # Dataset
42
+ --train_data_dir="$TRAINING_DIR"
43
+ --dataset_repeats=1
44
+ --resolution="1024,1024"
45
+ --enable_bucket
46
+ --bucket_reso_steps=32
47
+ --min_bucket_reso=256
48
+ --max_bucket_reso=2048
49
+ --flip_aug
50
+ --shuffle_caption
51
+ --cache_latents
52
+ --cache_latents_to_disk
53
+ --max_data_loader_n_workers=8
54
+ --persistent_data_loader_workers
55
+ # Network config
56
+ --network_dim=8
57
+ --network_alpha=4
58
+ --network_module="lycoris.kohya"
59
+ --network_args
60
+ "preset=full"
61
+ "conv_dim=256"
62
+ "conv_alpha=4"
63
+ "rank_dropout=0"
64
+ "module_dropout=0"
65
+ "use_tucker=False"
66
+ "use_scalar=False"
67
+ "rank_dropout_scale=False"
68
+ "algo=locon"
69
+ "dora_wd=False"
70
+ "train_norm=False"
71
+ --network_dropout=0
72
+ # Optimizer config
73
+ --optimizer_type=ClybW
74
+ --train_batch_size=12
75
+ --gradient_accumulation_steps=4
76
+ --max_grad_norm=1
77
+ --gradient_checkpointing
78
+ #--lr_warmup_steps=6
79
+ #--scale_weight_norms=1
80
+ # LR Scheduling
81
+ --max_train_steps=600
82
+ --learning_rate=0.0002
83
+ --unet_lr=0.0002
84
+ --text_encoder_lr=0.0001
85
+ --lr_scheduler="cosine"
86
+ --lr_scheduler_args="num_cycles=0.375"
87
+ # Noise
88
+ --multires_noise_iterations=12
89
+ --multires_noise_discount=0.4
90
+ #--min_snr_gamma=1
91
+ # Optimization, details
92
+ --no_half_vae
93
+ --sdpa
94
+ --mixed_precision="bf16"
95
+ # Saving
96
+ --save_model_as="safetensors"
97
+ --save_precision="fp16"
98
+ --save_every_n_steps=100
99
+ # Saving States
100
+ #--save_state
101
+ # Either resume from a saved state
102
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
103
+ #--skip_until_initial_step
104
+ # Or from a checkpoint
105
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
106
+ #--initial_step=120
107
+ # Sampling
108
+ --sample_every_n_steps=100
109
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
110
+ --sample_sampler="euler_a"
111
+ --caption_extension=".txt"
112
+ )
113
+
114
+ cd ~/source/repos/sd-scripts
115
+ #accelerate launch --num_cpu_threads_per_process=2
116
+ python "./sdxl_train_network.py" "${args[@]}"
117
+ cd ~
118
+
training_scripts/tp-try_this-lokr ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="by_hamgas-v3s1200"
21
+ TRAINING_DIR="/home/kade/datasets/by_hamgas"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=False"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=32"
86
+ "use_cp=True"
87
+ "dora_wd=True"
88
+ "train_norm=False"
89
+ --network_dropout=0
90
+ # Optimizer config
91
+ --optimizer_type=FCompass
92
+ --train_batch_size=8
93
+ --gradient_accumulation_steps=6
94
+ --max_grad_norm=1
95
+ --gradient_checkpointing
96
+ --lr_warmup_steps=0
97
+ #--scale_weight_norms=1
98
+ # LR Scheduling
99
+ --max_train_steps=$STEPS
100
+ --learning_rate=0.0005
101
+ --unet_lr=0.0002
102
+ --text_encoder_lr=0.0001
103
+ --lr_scheduler="cosine"
104
+ --lr_scheduler_args="num_cycles=0.375"
105
+ # Noise
106
+ --multires_noise_iterations=12
107
+ --multires_noise_discount=0.4
108
+ #--min_snr_gamma=1
109
+ # Optimization, details
110
+ --no_half_vae
111
+ --sdpa
112
+ --mixed_precision="bf16"
113
+ # Saving
114
+ --save_model_as="safetensors"
115
+ --save_precision="fp16"
116
+ --save_every_n_steps=100
117
+ # Saving States
118
+ #--save_state
119
+ # Either resume from a saved state
120
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
121
+ #--skip_until_initial_step
122
+ # Or from a checkpoint
123
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
124
+ #--initial_step=120
125
+ # Sampling
126
+ --sample_every_n_steps=100
127
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
128
+ --sample_sampler="euler_a"
129
+ --caption_extension=".txt"
130
+ )
131
+ cd ~/source/repos/sd-scripts
132
+ #accelerate launch --num_cpu_threads_per_process=2
133
+ python "./sdxl_train_network.py" "${args[@]}"
134
+ cd ~
training_scripts/tp_EXPE_-by_darkgem-lokr ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env zsh
2
+ #
3
+ # >>> conda initialize >>>
4
+ # !! Contents within this block are managed by 'conda init' !!
5
+ __conda_setup="$('/home/kade/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
6
+ if [ $? -eq 0 ]; then
7
+ eval "$__conda_setup"
8
+ else
9
+ if [ -f "/home/kade/miniconda3/etc/profile.d/conda.sh" ]; then
10
+ . "/home/kade/miniconda3/etc/profile.d/conda.sh"
11
+ else
12
+ export PATH="/home/kade/miniconda3/bin:$PATH"
13
+ fi
14
+ fi
15
+ unset __conda_setup
16
+ # <<< conda initialize <
17
+
18
+ conda activate sdscripts
19
+
20
+ NAME="$(basename ${0})"
21
+ TRAINING_DIR="/home/kade/datasets/by_darkgem"
22
+ OUTPUT_DIR="/home/kade/output_dir"
23
+
24
+ # Extract the number of steps from the NAME
25
+ STEPS=$(echo $NAME | grep -oE '[0-9]+$')
26
+
27
+ # If no number is found at the end of NAME, set a default value
28
+ if [ -z "$STEPS" ]; then
29
+ STEPS=4096
30
+ echo "No step count found in $NAME. Using default value of \e[35m$STEPS\e[0m"
31
+ else
32
+ echo "Extracted \e[35m$STEPS\e[0m steps from $NAME"
33
+ fi
34
+
35
+ # alpha=1 @ dim=16 is the same lr than alpha=4 @ dim=256
36
+ # --min_snr_gamma=1
37
+ args=(
38
+ # ⚠️ TODO: Benchmark...
39
+ --debiased_estimation_loss
40
+ # ⚠️ TODO: What does this do? Does it even work?
41
+ --max_token_length=225
42
+ # Keep Tokens
43
+ --keep_tokens=1
44
+ --keep_tokens_separator="|||"
45
+ # Model
46
+ --pretrained_model_name_or_path=/home/kade/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors
47
+ # Output, logging
48
+ --output_dir="$OUTPUT_DIR/$NAME"
49
+ --output_name="$NAME"
50
+ --log_prefix="$NAME-"
51
+ --log_with=tensorboard
52
+ --logging_dir="$OUTPUT_DIR/logs"
53
+ --seed=1728871242
54
+ # Dataset
55
+ --train_data_dir="$TRAINING_DIR"
56
+ --dataset_repeats=1
57
+ --resolution="1024,1024"
58
+ --enable_bucket
59
+ --bucket_reso_steps=64
60
+ --min_bucket_reso=256
61
+ --max_bucket_reso=2048
62
+ --flip_aug
63
+ --shuffle_caption
64
+ --cache_latents
65
+ --cache_latents_to_disk
66
+ --max_data_loader_n_workers=8
67
+ --persistent_data_loader_workers
68
+ # Network config
69
+ --network_dim=100000
70
+ # ⚠️ TODO: Plot
71
+ --network_alpha=64
72
+ --network_module="lycoris.kohya"
73
+ --network_args
74
+ "preset=full"
75
+ "conv_dim=100000"
76
+ "decompose_both=False"
77
+ "conv_alpha=64"
78
+ "rank_dropout=0"
79
+ "module_dropout=0"
80
+ "use_tucker=False"
81
+ "use_scalar=False"
82
+ "rank_dropout_scale=False"
83
+ "algo=lokr"
84
+ "bypass_mode=False"
85
+ "factor=32"
86
+ "use_cp=True"
87
+ "dora_wd=True"
88
+ "train_norm=False"
89
+ --network_dropout=0
90
+ # Optimizer config
91
+ --optimizer_type=FCompass
92
+ --train_batch_size=8
93
+ --gradient_accumulation_steps=6
94
+ --max_grad_norm=1
95
+ --gradient_checkpointing
96
+ --lr_warmup_steps=0
97
+ #--scale_weight_norms=1
98
+ # LR Scheduling
99
+ --max_train_steps=$STEPS
100
+ --learning_rate=0.0005
101
+ --unet_lr=0.0002
102
+ --text_encoder_lr=0.0001
103
+ --lr_scheduler="cosine"
104
+ --lr_scheduler_args="num_cycles=0.375"
105
+ # Noise
106
+ --multires_noise_iterations=12
107
+ --multires_noise_discount=0.4
108
+ #--min_snr_gamma=1
109
+ # Optimization, details
110
+ --no_half_vae
111
+ --sdpa
112
+ --mixed_precision="bf16"
113
+ # Saving
114
+ --save_model_as="safetensors"
115
+ --save_precision="fp16"
116
+ --save_every_n_steps=100
117
+ # Saving States
118
+ #--save_state
119
+ # Either resume from a saved state
120
+ #--resume="$OUTPUT_DIR/wolflink-vfucks400" # Resume from saved state
121
+ #--skip_until_initial_step
122
+ # Or from a checkpoint
123
+ #--network_weights="$OUTPUT_DIR/wolflink-vfucks400/wolflink-vfucks400-step00000120.safetensors" # Resume from checkpoint (not needed with state, i think)
124
+ #--initial_step=120
125
+ # Sampling
126
+ --sample_every_n_steps=100
127
+ --sample_prompts="$TRAINING_DIR/sample-prompts.txt"
128
+ --sample_sampler="euler_a"
129
+ --sample_at_first
130
+ --caption_extension=".txt"
131
+ )
132
+ cd ~/source/repos/sd-scripts
133
+ #accelerate launch --num_cpu_threads_per_process=2
134
+ echo "Running: python \"./sdxl_train_network.py\" \"${args[@]}\" \"$@\""
135
+ python "./sdxl_train_network.py" "${args[@]}" "$@"
136
+ cd ~