AlekseyCalvin commited on
Commit
6309d29
1 Parent(s): d874076

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. config.yaml +44 -46
  2. lora.safetensors +3 -0
config.yaml CHANGED
@@ -8,52 +8,50 @@ config:
8
  trigger_word: HST
9
  network:
10
  type: lora
11
- linear: 128
12
- linear_alpha: 128
13
  network_kwargs:
14
  only_if_contains:
15
- - transformer.transformer_blocks.0.norm1.linear
16
- - transformer.transformer_blocks.0.norm1_context.linear
17
- - transformer.transformer_blocks.0.attn.to_q
18
- - transformer.transformer_blocks.0.attn.to_k
19
- - transformer.transformer_blocks.0.attn.to_v
20
- - transformer.transformer_blocks.0.attn.add_k_proj
21
- - transformer.transformer_blocks.0.attn.add_v_proj
22
- - transformer.transformer_blocks.0.attn.add_q_proj
23
- - transformer.transformer_blocks.0.attn.to_out.0
24
- - transformer.transformer_blocks.0.attn.to_add_out
25
- - transformer.transformer_blocks.0.ff.net.0.proj
26
- - transformer.transformer_blocks.0.ff.net.2
27
- - transformer.transformer_blocks.0.ff_context.net.0.proj
28
- - transformer.transformer_blocks.0.ff_context.net.2
29
- - transformer.transformer_blocks.2.norm1.linear
30
- - transformer.transformer_blocks.2.norm1_context.linear
31
- - transformer.transformer_blocks.2.attn.to_q
32
- - transformer.transformer_blocks.2.attn.to_k
33
- - transformer.transformer_blocks.2.attn.to_v
34
- - transformer.transformer_blocks.2.attn.add_k_proj
35
- - transformer.transformer_blocks.2.attn.add_v_proj
36
- - transformer.transformer_blocks.2.attn.add_q_proj
37
- - transformer.transformer_blocks.2.attn.to_out.0
38
- - transformer.transformer_blocks.2.attn.to_add_out
39
- - transformer.transformer_blocks.2.ff.net.0.proj
40
- - transformer.transformer_blocks.2.ff.net.2
41
- - transformer.transformer_blocks.2.ff_context.net.0.proj
42
- - transformer.transformer_blocks.2.ff_context.net.2
43
- - transformer.transformer_blocks.18.norm1.linear
44
- - transformer.transformer_blocks.18.norm1_context.linear
45
- - transformer.transformer_blocks.18.attn.to_q
46
- - transformer.transformer_blocks.18.attn.to_k
47
- - transformer.transformer_blocks.18.attn.to_v
48
- - transformer.transformer_blocks.18.attn.add_k_proj
49
- - transformer.transformer_blocks.18.attn.add_v_proj
50
- - transformer.transformer_blocks.18.attn.add_q_proj
51
- - transformer.transformer_blocks.18.attn.to_out.0
52
- - transformer.transformer_blocks.18.attn.to_add_out
53
- - transformer.transformer_blocks.18.ff.net.0.proj
54
- - transformer.transformer_blocks.18.ff.net.2
55
- - transformer.transformer_blocks.18.ff_context.net.0.proj
56
- - transformer.transformer_blocks.18.ff_context.net.2
57
  save:
58
  dtype: float16
59
  save_every: 501
@@ -70,7 +68,7 @@ config:
70
  - 768
71
  - 1024
72
  train:
73
- batch_size: 1
74
  steps: 500
75
  gradient_accumulation_steps: 1
76
  train_unet: true
@@ -79,7 +77,7 @@ config:
79
  gradient_checkpointing: true
80
  noise_scheduler: flowmatch
81
  optimizer: adamw8bit
82
- lr: 0.0008
83
  ema_config:
84
  use_ema: true
85
  ema_decay: 0.99
 
8
  trigger_word: HST
9
  network:
10
  type: lora
11
+ linear: 32
12
+ linear_alpha: 32
13
  network_kwargs:
14
  only_if_contains:
15
+ - transformer.transformer_blocks.7.norm1.linear
16
+ - transformer.transformer_blocks.7.norm1_context.linear
17
+ - transformer.transformer_blocks.7.attn.to_q
18
+ - transformer.transformer_blocks.7.attn.to_k
19
+ - transformer.transformer_blocks.7.attn.to_v
20
+ - transformer.transformer_blocks.7.attn.add_k_proj
21
+ - transformer.transformer_blocks.7.attn.add_v_proj
22
+ - transformer.transformer_blocks.7.attn.add_q_proj
23
+ - transformer.transformer_blocks.7.attn.to_out.0
24
+ - transformer.transformer_blocks.7.attn.to_add_out
25
+ - transformer.transformer_blocks.7.ff.net.0.proj
26
+ - transformer.transformer_blocks.7.ff.net.2
27
+ - transformer.transformer_blocks.7.ff_context.net.0.proj
28
+ - transformer.transformer_blocks.7.ff_context.net.2
29
+ - transformer.transformer_blocks.13.norm1.linear
30
+ - transformer.transformer_blocks.13.norm1_context.linear
31
+ - transformer.transformer_blocks.13.attn.to_q
32
+ - transformer.transformer_blocks.13.attn.to_k
33
+ - transformer.transformer_blocks.13.attn.to_v
34
+ - transformer.transformer_blocks.13.attn.add_k_proj
35
+ - transformer.transformer_blocks.13.attn.add_v_proj
36
+ - transformer.transformer_blocks.13.attn.add_q_proj
37
+ - transformer.transformer_blocks.13.attn.to_out.0
38
+ - transformer.transformer_blocks.13.attn.to_add_out
39
+ - transformer.transformer_blocks.13.ff.net.0.proj
40
+ - transformer.transformer_blocks.13.ff.net.2
41
+ - transformer.transformer_blocks.13.ff_context.net.0.proj
42
+ - transformer.transformer_blocks.13.ff_context.net.2
43
+ - transformer.single_transformer_blocks.7.norm.linear
44
+ - transformer.single_transformer_blocks.7.proj_mlp
45
+ - transformer.single_transformer_blocks.7.proj_out
46
+ - transformer.single_transformer_blocks.7.attn.to_q
47
+ - transformer.single_transformer_blocks.7.attn.to_k
48
+ - transformer.single_transformer_blocks.7.attn.to_v
49
+ - transformer.single_transformer_blocks.13.norm.linear
50
+ - transformer.single_transformer_blocks.13.proj_mlp
51
+ - transformer.single_transformer_blocks.13.proj_out
52
+ - transformer.single_transformer_blocks.13.attn.to_q
53
+ - transformer.single_transformer_blocks.13.attn.to_k
54
+ - transformer.single_transformer_blocks.13.attn.to_v
 
 
55
  save:
56
  dtype: float16
57
  save_every: 501
 
68
  - 768
69
  - 1024
70
  train:
71
+ batch_size: 4
72
  steps: 500
73
  gradient_accumulation_steps: 1
74
  train_unet: true
 
77
  gradient_checkpointing: true
78
  noise_scheduler: flowmatch
79
  optimizer: adamw8bit
80
+ lr: 0.001
81
  ema_config:
82
  use_ema: true
83
  ema_decay: 0.99
lora.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a30dd0616b53c2b78b1e155881d84e1e0cb1212cb270321e2bb0f78cf4c6f17
3
+ size 27929464