File size: 1,943 Bytes
a193377 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
[[subsets]]
num_repeats = 4
keep_tokens = 1
caption_extension = ".txt"
shuffle_caption = true
flip_aug = false
color_aug = false
random_crop = true
is_reg = false
image_dir = "F:/Pictures/Grabber/kazamatsuri_fuuka\\keep"
[[subsets]]
num_repeats = 3
keep_tokens = 1
caption_extension = ".txt"
shuffle_caption = true
flip_aug = false
color_aug = false
random_crop = true
is_reg = false
image_dir = "F:/Pictures/Grabber/kazamatsuri_fuuka\\misc"
[sample_args]
[logging_args]
[general_args.args]
pretrained_model_name_or_path = "F:/Desktop/stable diffusion/LoRA/AnimeFullFinal.safetensors"
mixed_precision = "bf16"
seed = 23
clip_skip = 2
max_data_loader_n_workers = 1
persistent_data_loader_workers = true
max_token_length = 225
prior_loss_weight = 1.0
sdpa = true
max_train_epochs = 20
[general_args.dataset_args]
resolution = 768
batch_size = 8
[network_args.args]
network_dim = 16
network_alpha = 8.0
min_timestep = 0
max_timestep = 1000
[optimizer_args.args]
optimizer_type = "AdamW8bit"
lr_scheduler = "cosine"
learning_rate = 0.0001
max_grad_norm = 1.0
lr_scheduler_type = "LoraEasyCustomOptimizer.CustomOptimizers.CosineAnnealingWarmupRestarts"
lr_scheduler_num_cycles = 4
unet_lr = 0.0005
warmup_ratio = 0.1
min_snr_gamma = 8
scale_weight_norms = 5.0
[saving_args.args]
output_dir = "F:/stable_diffusion_models_and_outputs/models/Lora/v1/Characters/kazamatsuri_fuuka"
save_precision = "fp16"
save_model_as = "safetensors"
output_name = "kazamatsuri_fuuka"
save_every_n_epochs = 2
tag_occurrence = true
save_toml = true
[bucket_args.dataset_args]
enable_bucket = true
min_bucket_reso = 256
max_bucket_reso = 1024
bucket_reso_steps = 64
[noise_args.args]
multires_noise_iterations = 6
multires_noise_discount = 0.3
[network_args.args.network_args]
dropout = 0.3
module_dropout = 0.25
[optimizer_args.args.lr_scheduler_args]
min_lr = 1e-6
gamma = 0.85
[optimizer_args.args.optimizer_args]
weight_decay = "0.1"
betas = "0.9,0.99"
|