[[subsets]] num_repeats = 25 caption_extension = ".txt" shuffle_caption = true flip_aug = false is_reg = false image_dir = "E:/Everything artificial intelligence/loradataset\\15_ohw fkey" keep_tokens = 0 [noise_args] [sample_args] [logging_args] [general_args.args] pretrained_model_name_or_path = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Stable-diffusion/animefull-final-pruned-fp16.safetensors" mixed_precision = "fp16" seed = 1 max_data_loader_n_workers = 1 persistent_data_loader_workers = true max_token_length = 225 prior_loss_weight = 1.0 clip_skip = 2 xformers = true cache_latents = true vae = "" max_train_epochs = 1 [general_args.dataset_args] resolution = 512 batch_size = 1 [network_args.args] network_dim = 32 network_alpha = 16.0 min_timestep = 0 max_timestep = 1000 [optimizer_args.args] optimizer_type = "AdamW8bit" lr_scheduler = "cosine" learning_rate = 0.0001 max_grad_norm = 1.0 lr_scheduler_type = "LoraEasyCustomOptimizer.CustomOptimizers.CosineAnnealingWarmupRestarts" lr_scheduler_num_cycles = 1 warmup_ratio = 0.05 [saving_args.args] output_dir = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/fkeytest2" save_precision = "fp16" save_model_as = "safetensors" output_name = "fkeytest2" save_toml = true save_toml_location = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/fkey2" [bucket_args.dataset_args] enable_bucket = true min_bucket_reso = 256 max_bucket_reso = 1024 bucket_reso_steps = 64 [network_args.args.network_args] conv_dim = 32 conv_alpha = 16.0 [optimizer_args.args.lr_scheduler_args] min_lr = 1e-6 gamma = 0.9 [optimizer_args.args.optimizer_args] weight_decay = "0.1" betas = "0.9,0.99"