feat: upload lora model
Browse files- _config/config_file.toml +5 -5
- _config/sample_prompt.toml +1 -1
_config/config_file.toml
CHANGED
@@ -7,7 +7,7 @@ shuffle_caption = true
|
|
7 |
lowram = true
|
8 |
|
9 |
[model_arguments]
|
10 |
-
pretrained_model_name_or_path = "cagliostrolab/animagine-xl-3.
|
11 |
vae = "/content/vae/sdxl_vae.safetensors"
|
12 |
|
13 |
[dataset_arguments]
|
@@ -22,8 +22,8 @@ token_warmup_min = 1
|
|
22 |
token_warmup_step = 0
|
23 |
|
24 |
[training_arguments]
|
25 |
-
output_dir = "/content/LoRA/output/
|
26 |
-
output_name = "
|
27 |
save_precision = "fp16"
|
28 |
save_every_n_epochs = 2
|
29 |
train_batch_size = 4
|
@@ -31,7 +31,7 @@ max_token_length = 225
|
|
31 |
mem_eff_attn = false
|
32 |
sdpa = true
|
33 |
xformers = false
|
34 |
-
max_train_epochs =
|
35 |
max_data_loader_n_workers = 8
|
36 |
persistent_data_loader_workers = true
|
37 |
gradient_checkpointing = true
|
@@ -41,7 +41,7 @@ mixed_precision = "fp16"
|
|
41 |
[logging_arguments]
|
42 |
log_with = "tensorboard"
|
43 |
logging_dir = "/content/LoRA/logs"
|
44 |
-
log_prefix = "
|
45 |
|
46 |
[sample_prompt_arguments]
|
47 |
sample_every_n_epochs = 1
|
|
|
7 |
lowram = true
|
8 |
|
9 |
[model_arguments]
|
10 |
+
pretrained_model_name_or_path = "cagliostrolab/animagine-xl-3.1"
|
11 |
vae = "/content/vae/sdxl_vae.safetensors"
|
12 |
|
13 |
[dataset_arguments]
|
|
|
22 |
token_warmup_step = 0
|
23 |
|
24 |
[training_arguments]
|
25 |
+
output_dir = "/content/LoRA/output/kakure"
|
26 |
+
output_name = "kakure"
|
27 |
save_precision = "fp16"
|
28 |
save_every_n_epochs = 2
|
29 |
train_batch_size = 4
|
|
|
31 |
mem_eff_attn = false
|
32 |
sdpa = true
|
33 |
xformers = false
|
34 |
+
max_train_epochs = 20
|
35 |
max_data_loader_n_workers = 8
|
36 |
persistent_data_loader_workers = true
|
37 |
gradient_checkpointing = true
|
|
|
41 |
[logging_arguments]
|
42 |
log_with = "tensorboard"
|
43 |
logging_dir = "/content/LoRA/logs"
|
44 |
+
log_prefix = "kakure"
|
45 |
|
46 |
[sample_prompt_arguments]
|
47 |
sample_every_n_epochs = 1
|
_config/sample_prompt.toml
CHANGED
@@ -5,5 +5,5 @@ height = 1024
|
|
5 |
scale = 12
|
6 |
sample_steps = 28
|
7 |
[[prompt.subset]]
|
8 |
-
prompt = "masterpiece,best quality,
|
9 |
|
|
|
5 |
scale = 12
|
6 |
sample_steps = 28
|
7 |
[[prompt.subset]]
|
8 |
+
prompt = "masterpiece,best quality,kakure,1girl"
|
9 |
|