[[subsets]] num_repeats = 4 keep_tokens = 1 caption_extension = ".txt" shuffle_caption = true flip_aug = false color_aug = false random_crop = true is_reg = false image_dir = "F:/Desktop/stable diffusion/LoRA/lora_datasets/characters/hatsuzuki/4_swim" [[subsets]] num_repeats = 6 keep_tokens = 1 caption_extension = ".txt" shuffle_caption = true flip_aug = false color_aug = false random_crop = true is_reg = false image_dir = "F:/Desktop/stable diffusion/LoRA/lora_datasets/characters/hatsuzuki/6_main" [[subsets]] num_repeats = 1 keep_tokens = 0 caption_extension = ".txt" shuffle_caption = true flip_aug = false color_aug = false random_crop = true is_reg = true image_dir = "F:/Downloads/Compressed/test/output" [sample_args] [logging_args] [general_args.args] pretrained_model_name_or_path = "F:/Desktop/stable diffusion/LoRA/AnimeFullFinal.safetensors" mixed_precision = "bf16" seed = 23 clip_skip = 2 max_data_loader_n_workers = 1 persistent_data_loader_workers = true max_token_length = 225 prior_loss_weight = 1.0 xformers = true max_train_epochs = 20 [general_args.dataset_args] resolution = 768 batch_size = 8 [network_args.args] network_dim = 16 network_alpha = 8.0 min_timestep = 0 max_timestep = 1000 [optimizer_args.args] optimizer_type = "AdamW8bit" lr_scheduler = "cosine" learning_rate = 0.0001 max_grad_norm = 1.0 lr_scheduler_type = "LoraEasyCustomOptimizer.CustomOptimizers.CosineAnnealingWarmupRestarts" lr_scheduler_num_cycles = 4 unet_lr = 0.0005 warmup_ratio = 0.1 min_snr_gamma = 8 scale_weight_norms = 5.0 [saving_args.args] output_dir = "F:/stable_diffusion_models_and_outputs/models/Lora/v1/Characters/hatsuzuki" save_precision = "fp16" save_model_as = "safetensors" output_name = "hatsuzuki_with_reg" save_every_n_epochs = 2 tag_occurrence = true save_toml = true [bucket_args.dataset_args] enable_bucket = true min_bucket_reso = 256 max_bucket_reso = 1024 bucket_reso_steps = 64 [noise_args.args] multires_noise_iterations = 6 multires_noise_discount = 0.3 [network_args.args.network_args] dropout = 0.3 module_dropout = 0.25 [optimizer_args.args.lr_scheduler_args] min_lr = 1e-6 gamma = 0.85 [optimizer_args.args.optimizer_args] weight_decay = "0.1" betas = "0.9,0.99"