End of training
Browse files- README.md +1 -1
- checkpoint-7000/optimizer.bin +3 -0
- checkpoint-7000/random_states_0.pkl +3 -0
- checkpoint-7000/scaler.pt +3 -0
- checkpoint-7000/scheduler.bin +3 -0
- checkpoint-7000/unet/config.json +67 -0
- checkpoint-7000/unet/diffusion_pytorch_model.safetensors +3 -0
- results/text2image-fine-tune/1696224359.5462363/events.out.tfevents.1696224359.e9ff7e0f3622.6376.1 +3 -0
- results/text2image-fine-tune/1696224359.5480986/hparams.yml +50 -0
- results/text2image-fine-tune/events.out.tfevents.1696224022.e9ff7e0f3622.4903.0 +2 -2
- results/text2image-fine-tune/events.out.tfevents.1696224359.e9ff7e0f3622.6376.0 +3 -0
- unet/diffusion_pytorch_model.safetensors +1 -1
- val_imgs_grid.png +0 -0
README.md
CHANGED
@@ -37,7 +37,7 @@ image.save("my_image.png")
|
|
37 |
|
38 |
These are the key hyperparameters used during training:
|
39 |
|
40 |
-
* Epochs:
|
41 |
* Learning rate: 2e-06
|
42 |
* Batch size: 2
|
43 |
* Gradient accumulation steps: 1
|
|
|
37 |
|
38 |
These are the key hyperparameters used during training:
|
39 |
|
40 |
+
* Epochs: 17
|
41 |
* Learning rate: 2e-06
|
42 |
* Batch size: 2
|
43 |
* Gradient accumulation steps: 1
|
checkpoint-7000/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1098894dee08e6edf5469d9cd2ba489b753ddf097e002ae124962a3edb781e24
|
3 |
+
size 1725109957
|
checkpoint-7000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53a2ff0bd429c0b69e9b8b53c8e8ac4cf45d5cdb772c857d1c9ebd33ddf1daa1
|
3 |
+
size 14663
|
checkpoint-7000/scaler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d22aed3a858ee389297b2101bb2ed7ee9e37744f1a1f75273dbdf045d65b041a
|
3 |
+
size 557
|
checkpoint-7000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fea83ff14f5e8f1da2235cf21ca8b3fdc5fd812f31508a5c7359633540e54ac7
|
3 |
+
size 563
|
checkpoint-7000/unet/config.json
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.22.0.dev0",
|
4 |
+
"_name_or_path": "ekshat/stable-diffusion-anime-style",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"attention_type": "default",
|
11 |
+
"block_out_channels": [
|
12 |
+
320,
|
13 |
+
640,
|
14 |
+
1280,
|
15 |
+
1280
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"class_embeddings_concat": false,
|
20 |
+
"conv_in_kernel": 3,
|
21 |
+
"conv_out_kernel": 3,
|
22 |
+
"cross_attention_dim": 768,
|
23 |
+
"cross_attention_norm": null,
|
24 |
+
"down_block_types": [
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"CrossAttnDownBlock2D",
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"DownBlock2D"
|
29 |
+
],
|
30 |
+
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
+
"dual_cross_attention": false,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"in_channels": 4,
|
38 |
+
"layers_per_block": 2,
|
39 |
+
"mid_block_only_cross_attention": null,
|
40 |
+
"mid_block_scale_factor": 1,
|
41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
+
"norm_eps": 1e-05,
|
43 |
+
"norm_num_groups": 32,
|
44 |
+
"num_attention_heads": null,
|
45 |
+
"num_class_embeds": null,
|
46 |
+
"only_cross_attention": false,
|
47 |
+
"out_channels": 4,
|
48 |
+
"projection_class_embeddings_input_dim": null,
|
49 |
+
"resnet_out_scale_factor": 1.0,
|
50 |
+
"resnet_skip_time_act": false,
|
51 |
+
"resnet_time_scale_shift": "default",
|
52 |
+
"sample_size": 64,
|
53 |
+
"time_cond_proj_dim": null,
|
54 |
+
"time_embedding_act_fn": null,
|
55 |
+
"time_embedding_dim": null,
|
56 |
+
"time_embedding_type": "positional",
|
57 |
+
"timestep_post_act": null,
|
58 |
+
"transformer_layers_per_block": 1,
|
59 |
+
"up_block_types": [
|
60 |
+
"UpBlock2D",
|
61 |
+
"CrossAttnUpBlock2D",
|
62 |
+
"CrossAttnUpBlock2D",
|
63 |
+
"CrossAttnUpBlock2D"
|
64 |
+
],
|
65 |
+
"upcast_attention": false,
|
66 |
+
"use_linear_projection": false
|
67 |
+
}
|
checkpoint-7000/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b83475abbf46a313166aefe2be9a287ebcd592bdc33962563fddb63d517a3874
|
3 |
+
size 3438167536
|
results/text2image-fine-tune/1696224359.5462363/events.out.tfevents.1696224359.e9ff7e0f3622.6376.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:453a00720b21a25c128775d8ac110cac1d5f08da566ae601b95504eb17ab1ba0
|
3 |
+
size 2294
|
results/text2image-fine-tune/1696224359.5480986/hparams.yml
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 7000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: lambdalabs/naruto-blip-captions
|
14 |
+
enable_xformers_memory_efficient_attention: true
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: true
|
17 |
+
hub_model_id: null
|
18 |
+
hub_token: null
|
19 |
+
image_column: image
|
20 |
+
input_perturbation: 0
|
21 |
+
learning_rate: 2.0e-06
|
22 |
+
local_rank: -1
|
23 |
+
logging_dir: ./results
|
24 |
+
lr_scheduler: constant
|
25 |
+
lr_warmup_steps: 0
|
26 |
+
max_grad_norm: 1.0
|
27 |
+
max_train_samples: null
|
28 |
+
max_train_steps: 10000
|
29 |
+
mixed_precision: fp16
|
30 |
+
noise_offset: 0
|
31 |
+
non_ema_revision: null
|
32 |
+
num_train_epochs: 17
|
33 |
+
output_dir: /content/Stable_Diffussion_Anime_Style
|
34 |
+
prediction_type: null
|
35 |
+
pretrained_model_name_or_path: ekshat/stable-diffusion-anime-style
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: false
|
38 |
+
report_to: tensorboard
|
39 |
+
resolution: 512
|
40 |
+
resume_from_checkpoint: null
|
41 |
+
revision: null
|
42 |
+
scale_lr: false
|
43 |
+
seed: null
|
44 |
+
snr_gamma: null
|
45 |
+
tracker_project_name: text2image-fine-tune
|
46 |
+
train_batch_size: 2
|
47 |
+
train_data_dir: null
|
48 |
+
use_8bit_adam: true
|
49 |
+
use_ema: false
|
50 |
+
validation_epochs: 5
|
results/text2image-fine-tune/events.out.tfevents.1696224022.e9ff7e0f3622.4903.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:944736cf73a7661ee8b5217031b0b84a610b5ce902a3ab684e85108561144163
|
3 |
+
size 335669
|
results/text2image-fine-tune/events.out.tfevents.1696224359.e9ff7e0f3622.6376.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:20ece6a436996fa9779245783ce8d35f8f36e2823eaa884ab02352d3b5f7832f
|
3 |
+
size 1870939
|
unet/diffusion_pytorch_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3438167536
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2b5422b668b7f1e75a63e2c9b01cb2f90ac7ff2afbd30920b1f1923d1a8e87af
|
3 |
size 3438167536
|
val_imgs_grid.png
CHANGED