chckpnt-mrrng
commited on
Commit
·
76ec94e
1
Parent(s):
918da3f
feat: upload by_wieslaw_walkuski_sd15_1 lora model
Browse files
by_wieslaw_walkuski_sd15_1_config/config_file.toml
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[model_arguments]
|
2 |
+
v2 = false
|
3 |
+
v_parameterization = false
|
4 |
+
pretrained_model_name_or_path = "/content/pretrained_model/Stable-Diffusion-v1-5.safetensors"
|
5 |
+
vae = "/content/vae/stablediffusion.vae.pt"
|
6 |
+
|
7 |
+
[additional_network_arguments]
|
8 |
+
no_metadata = false
|
9 |
+
unet_lr = 0.0001
|
10 |
+
text_encoder_lr = 5e-5
|
11 |
+
network_module = "networks.lora"
|
12 |
+
network_dim = 32
|
13 |
+
network_alpha = 16
|
14 |
+
network_train_unet_only = false
|
15 |
+
network_train_text_encoder_only = false
|
16 |
+
|
17 |
+
[optimizer_arguments]
|
18 |
+
optimizer_type = "AdamW8bit"
|
19 |
+
learning_rate = 0.0001
|
20 |
+
max_grad_norm = 1.0
|
21 |
+
lr_scheduler = "constant"
|
22 |
+
lr_warmup_steps = 0
|
23 |
+
|
24 |
+
[dataset_arguments]
|
25 |
+
debug_dataset = false
|
26 |
+
in_json = "/content/LoRA/meta_lat.json"
|
27 |
+
train_data_dir = "/content/drive/MyDrive/WIESLAW_WALKUSKI"
|
28 |
+
dataset_repeats = 3
|
29 |
+
shuffle_caption = true
|
30 |
+
keep_tokens = 0
|
31 |
+
resolution = "512,512"
|
32 |
+
caption_dropout_rate = 0
|
33 |
+
caption_tag_dropout_rate = 0
|
34 |
+
caption_dropout_every_n_epochs = 0
|
35 |
+
color_aug = false
|
36 |
+
token_warmup_min = 1
|
37 |
+
token_warmup_step = 0
|
38 |
+
|
39 |
+
[training_arguments]
|
40 |
+
output_dir = "/content/drive/MyDrive/LoRA/output"
|
41 |
+
output_name = "by_wieslaw_walkuski_sd15_1"
|
42 |
+
save_precision = "fp16"
|
43 |
+
save_every_n_epochs = 1
|
44 |
+
train_batch_size = 2
|
45 |
+
max_token_length = 225
|
46 |
+
mem_eff_attn = false
|
47 |
+
xformers = true
|
48 |
+
max_train_epochs = 10
|
49 |
+
max_data_loader_n_workers = 8
|
50 |
+
persistent_data_loader_workers = true
|
51 |
+
gradient_checkpointing = false
|
52 |
+
gradient_accumulation_steps = 1
|
53 |
+
mixed_precision = "fp16"
|
54 |
+
clip_skip = 2
|
55 |
+
logging_dir = "/content/LoRA/logs"
|
56 |
+
log_prefix = "by_wieslaw_walkuski_sd15_1"
|
57 |
+
noise_offset = 0.001
|
58 |
+
lowram = false
|
59 |
+
|
60 |
+
[sample_prompt_arguments]
|
61 |
+
sample_every_n_epochs = 1
|
62 |
+
sample_sampler = "ddim"
|
63 |
+
|
64 |
+
[saving_arguments]
|
65 |
+
save_model_as = "safetensors"
|
by_wieslaw_walkuski_sd15_1_config/sample_prompt.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
masterpiece, best quality, 1girl, aqua eyes, baseball cap, blonde hair, closed mouth, earrings, green background, hat, hoop earrings, jewelry, looking at viewer, shirt, short hair, simple background, solo, upper body, yellow shirt --n lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry --w 512 --h 768 --l 7 --s 28
|