c14kevincardenas commited on
Commit
8241baa
·
verified ·
1 Parent(s): 4518c05

Training in progress, epoch 1

Browse files
Files changed (26) hide show
  1. README.md +83 -0
  2. all_results.json +13 -0
  3. config.json +93 -0
  4. eval_results.json +8 -0
  5. model.safetensors +3 -0
  6. preprocessor_config.json +28 -0
  7. runs/Feb10_23-07-07_galactica.ad.cirange.net/events.out.tfevents.1739228898.galactica.ad.cirange.net.2112498.0 +3 -0
  8. runs/Feb10_23-10-00_galactica.ad.cirange.net/events.out.tfevents.1739229005.galactica.ad.cirange.net.2113898.0 +3 -0
  9. runs/Feb10_23-15-46_galactica.ad.cirange.net/events.out.tfevents.1739229351.galactica.ad.cirange.net.2114556.0 +3 -0
  10. runs/Feb10_23-15-46_galactica.ad.cirange.net/events.out.tfevents.1739233102.galactica.ad.cirange.net.2114556.1 +3 -0
  11. runs/Feb11_16-52-08_galactica.ad.cirange.net/events.out.tfevents.1739292750.galactica.ad.cirange.net.2142376.0 +3 -0
  12. runs/Feb11_16-52-08_galactica.ad.cirange.net/events.out.tfevents.1739296199.galactica.ad.cirange.net.2142376.1 +3 -0
  13. runs/Feb11_17-51-12_galactica.ad.cirange.net/events.out.tfevents.1739296278.galactica.ad.cirange.net.2161252.0 +3 -0
  14. runs/Feb12_20-38-04_galactica.ad.cirange.net/events.out.tfevents.1739392689.galactica.ad.cirange.net.2335522.0 +3 -0
  15. runs/Feb12_20-38-04_galactica.ad.cirange.net/events.out.tfevents.1739396347.galactica.ad.cirange.net.2335522.1 +3 -0
  16. runs/Feb12_22-40-50_galactica.ad.cirange.net/events.out.tfevents.1739400056.galactica.ad.cirange.net.2354828.0 +3 -0
  17. runs/Feb12_22-40-50_galactica.ad.cirange.net/events.out.tfevents.1739403709.galactica.ad.cirange.net.2354828.1 +3 -0
  18. runs/Feb18_14-54-27_galactica.ad.cirange.net/events.out.tfevents.1739890472.galactica.ad.cirange.net.2653462.0 +3 -0
  19. runs/Feb18_14-54-27_galactica.ad.cirange.net/events.out.tfevents.1739894315.galactica.ad.cirange.net.2653462.1 +3 -0
  20. runs/Feb18_22-12-13_galactica.ad.cirange.net/events.out.tfevents.1739916738.galactica.ad.cirange.net.2675420.0 +3 -0
  21. runs/Feb18_22-12-13_galactica.ad.cirange.net/events.out.tfevents.1739920410.galactica.ad.cirange.net.2675420.1 +3 -0
  22. runs/Feb18_23-22-11_galactica.ad.cirange.net/events.out.tfevents.1739920937.galactica.ad.cirange.net.2694498.0 +3 -0
  23. runs/Feb18_23-24-01_galactica.ad.cirange.net/events.out.tfevents.1739921046.galactica.ad.cirange.net.2696018.0 +3 -0
  24. train_results.json +8 -0
  25. trainer_state.json +719 -0
  26. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: microsoft/beit-large-patch16-384
5
+ tags:
6
+ - image-regression
7
+ - human-movement
8
+ - vision
9
+ - generated_from_trainer
10
+ model-index:
11
+ - name: limbxy_pose
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # limbxy_pose
19
+
20
+ This model is a fine-tuned version of [microsoft/beit-large-patch16-384](https://huggingface.co/microsoft/beit-large-patch16-384) on the c14kevincardenas/beta_caller_284_limbxy_pose dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.1399
23
+ - Rmse: 0.3740
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 5e-05
43
+ - train_batch_size: 64
44
+ - eval_batch_size: 64
45
+ - seed: 2014
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_steps: 250
49
+ - num_epochs: 20.0
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Rmse |
55
+ |:-------------:|:-----:|:----:|:---------------:|:------:|
56
+ | 0.2029 | 1.0 | 89 | 0.2039 | 0.4516 |
57
+ | 0.1535 | 2.0 | 178 | 0.1545 | 0.3931 |
58
+ | 0.1847 | 3.0 | 267 | 0.1640 | 0.4050 |
59
+ | 0.1582 | 4.0 | 356 | 0.1427 | 0.3777 |
60
+ | 0.1471 | 5.0 | 445 | 0.1427 | 0.3778 |
61
+ | 0.1553 | 6.0 | 534 | 0.1469 | 0.3833 |
62
+ | 0.1613 | 7.0 | 623 | 0.1430 | 0.3782 |
63
+ | 0.1471 | 8.0 | 712 | 0.1427 | 0.3778 |
64
+ | 0.1587 | 9.0 | 801 | 0.1417 | 0.3764 |
65
+ | 0.1504 | 10.0 | 890 | 0.1427 | 0.3777 |
66
+ | 0.1431 | 11.0 | 979 | 0.1429 | 0.3780 |
67
+ | 0.1455 | 12.0 | 1068 | 0.1442 | 0.3797 |
68
+ | 0.1515 | 13.0 | 1157 | 0.1431 | 0.3783 |
69
+ | 0.1407 | 14.0 | 1246 | 0.1443 | 0.3799 |
70
+ | 0.1436 | 15.0 | 1335 | 0.1419 | 0.3768 |
71
+ | 0.1425 | 16.0 | 1424 | 0.1399 | 0.3740 |
72
+ | 0.1411 | 17.0 | 1513 | 0.1401 | 0.3743 |
73
+ | 0.1401 | 18.0 | 1602 | 0.1408 | 0.3752 |
74
+ | 0.1426 | 19.0 | 1691 | 0.1406 | 0.3750 |
75
+ | 0.1388 | 20.0 | 1780 | 0.1400 | 0.3742 |
76
+
77
+
78
+ ### Framework versions
79
+
80
+ - Transformers 4.45.2
81
+ - Pytorch 2.5.0+cu124
82
+ - Datasets 3.0.1
83
+ - Tokenizers 0.20.1
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_loss": 0.1398855596780777,
4
+ "eval_rmse": 0.3740127682685852,
5
+ "eval_runtime": 10.0622,
6
+ "eval_samples_per_second": 99.382,
7
+ "eval_steps_per_second": 1.59,
8
+ "total_flos": 0.0,
9
+ "train_loss": 0.15538578709859527,
10
+ "train_runtime": 3316.8972,
11
+ "train_samples_per_second": 34.14,
12
+ "train_steps_per_second": 0.537
13
+ }
config.json ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "c14kevincardenas/beit-large-patch16-384-limb",
3
+ "add_fpn": false,
4
+ "architectures": [
5
+ "LimbXYModel"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "auxiliary_channels": 256,
9
+ "auxiliary_concat_input": false,
10
+ "auxiliary_loss_weight": 0.4,
11
+ "auxiliary_num_convs": 1,
12
+ "d_model": 1024,
13
+ "drop_path_rate": 0.1,
14
+ "finetuning_task": "regression",
15
+ "hidden_act": "gelu",
16
+ "hidden_dropout_prob": 0.0,
17
+ "hidden_size": 1024,
18
+ "id2label": {
19
+ "0": "left_foot",
20
+ "1": "left_hand",
21
+ "2": "right_foot",
22
+ "3": "right_hand"
23
+ },
24
+ "image_size": 384,
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 4096,
27
+ "label2id": {
28
+ "left_foot": "0",
29
+ "left_hand": "1",
30
+ "right_foot": "2",
31
+ "right_hand": "3"
32
+ },
33
+ "layer_norm_eps": 1e-12,
34
+ "layer_scale_init_value": 0.1,
35
+ "model_type": "custom_model",
36
+ "nhead": 2,
37
+ "num_attention_heads": 16,
38
+ "num_channels": 3,
39
+ "num_classes": 4,
40
+ "num_hidden_layers": 24,
41
+ "num_layers": 1,
42
+ "out_features": [
43
+ "stage24"
44
+ ],
45
+ "out_indices": [
46
+ 24
47
+ ],
48
+ "patch_size": 16,
49
+ "pool_scales": [
50
+ 1,
51
+ 2,
52
+ 3,
53
+ 6
54
+ ],
55
+ "reshape_hidden_states": true,
56
+ "semantic_loss_ignore_index": 255,
57
+ "stage_names": [
58
+ "stem",
59
+ "stage1",
60
+ "stage2",
61
+ "stage3",
62
+ "stage4",
63
+ "stage5",
64
+ "stage6",
65
+ "stage7",
66
+ "stage8",
67
+ "stage9",
68
+ "stage10",
69
+ "stage11",
70
+ "stage12",
71
+ "stage13",
72
+ "stage14",
73
+ "stage15",
74
+ "stage16",
75
+ "stage17",
76
+ "stage18",
77
+ "stage19",
78
+ "stage20",
79
+ "stage21",
80
+ "stage22",
81
+ "stage23",
82
+ "stage24"
83
+ ],
84
+ "torch_dtype": "float32",
85
+ "transformers_version": "4.45.2",
86
+ "use_absolute_position_embeddings": false,
87
+ "use_auxiliary_head": true,
88
+ "use_mask_token": false,
89
+ "use_mean_pooling": true,
90
+ "use_relative_position_bias": true,
91
+ "use_shared_relative_position_bias": false,
92
+ "vocab_size": 8192
93
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_loss": 0.1398855596780777,
4
+ "eval_rmse": 0.3740127682685852,
5
+ "eval_runtime": 10.0622,
6
+ "eval_samples_per_second": 99.382,
7
+ "eval_steps_per_second": 1.59
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:661c13f6f604ca390b88a7927d7455358092ea5dd03c91487c20a19996a96eaa
3
+ size 1216514232
preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": false,
7
+ "do_normalize": true,
8
+ "do_reduce_labels": false,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "image_processor_type": "BeitImageProcessor",
17
+ "image_std": [
18
+ 0.5,
19
+ 0.5,
20
+ 0.5
21
+ ],
22
+ "resample": 2,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "height": 384,
26
+ "width": 384
27
+ }
28
+ }
runs/Feb10_23-07-07_galactica.ad.cirange.net/events.out.tfevents.1739228898.galactica.ad.cirange.net.2112498.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e4a1f116e1b974c39f6ddf45344045518a253a8a117f9896924da68b37282e2
3
+ size 4184
runs/Feb10_23-10-00_galactica.ad.cirange.net/events.out.tfevents.1739229005.galactica.ad.cirange.net.2113898.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8f48b106359e01dc30a8a4e0ea20fe0e07793fd94356e3b30ff4de07a6a407c
3
+ size 6182
runs/Feb10_23-15-46_galactica.ad.cirange.net/events.out.tfevents.1739229351.galactica.ad.cirange.net.2114556.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eda76ce9f8d63dba49c6dcebd293cfd33bb4b8e7bb1142d9a7f302740db976bf
3
+ size 27871
runs/Feb10_23-15-46_galactica.ad.cirange.net/events.out.tfevents.1739233102.galactica.ad.cirange.net.2114556.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e43153af21be343e3651916c979e0de48228371b8a7dd3574c4188beeba3184d
3
+ size 407
runs/Feb11_16-52-08_galactica.ad.cirange.net/events.out.tfevents.1739292750.galactica.ad.cirange.net.2142376.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9fe7be42a686fdfe0ccb693bbccdebfc87e46df84e028d4ff2ebfbb1699e5d3
3
+ size 96795
runs/Feb11_16-52-08_galactica.ad.cirange.net/events.out.tfevents.1739296199.galactica.ad.cirange.net.2142376.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:249757ef4c7779c7620c949c5882d1147b375c8b7148f532b0e98901b1e31497
3
+ size 407
runs/Feb11_17-51-12_galactica.ad.cirange.net/events.out.tfevents.1739296278.galactica.ad.cirange.net.2161252.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8774773e3c8c0691e9590b123573c3aeb247508a4310e61e80f643f72e825ac5
3
+ size 81425
runs/Feb12_20-38-04_galactica.ad.cirange.net/events.out.tfevents.1739392689.galactica.ad.cirange.net.2335522.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2562508385b503720e9dffe4c32ef9e2844fa57a3e2e88682fe0462b461e2b53
3
+ size 27871
runs/Feb12_20-38-04_galactica.ad.cirange.net/events.out.tfevents.1739396347.galactica.ad.cirange.net.2335522.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:154aecb3afb417181ff3d50a3f1b141f854819774859f67958f5623a1160745d
3
+ size 407
runs/Feb12_22-40-50_galactica.ad.cirange.net/events.out.tfevents.1739400056.galactica.ad.cirange.net.2354828.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7a134eccf52b8a7bdc23533421d9bbb07b94b4ec8f806e41a463911cf5dfd7
3
+ size 27871
runs/Feb12_22-40-50_galactica.ad.cirange.net/events.out.tfevents.1739403709.galactica.ad.cirange.net.2354828.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:120052f23d2ed545abb5a1692a6fca5efc0f5158416a4e2513625f7494af81f8
3
+ size 407
runs/Feb18_14-54-27_galactica.ad.cirange.net/events.out.tfevents.1739890472.galactica.ad.cirange.net.2653462.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7946804b99cd3e153e1ef102af2a03240c10518d697fa5b87f42535d10974c7d
3
+ size 27871
runs/Feb18_14-54-27_galactica.ad.cirange.net/events.out.tfevents.1739894315.galactica.ad.cirange.net.2653462.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b5023a9ffdd0624d9ef3020d2ec638857c147eabbfd31a1ef87652339a7554
3
+ size 407
runs/Feb18_22-12-13_galactica.ad.cirange.net/events.out.tfevents.1739916738.galactica.ad.cirange.net.2675420.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75b5293e9103111a9bc3801d6e72b57c7b0f7bbcbe84984df7a661f112a4cf8f
3
+ size 96795
runs/Feb18_22-12-13_galactica.ad.cirange.net/events.out.tfevents.1739920410.galactica.ad.cirange.net.2675420.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:693c817e84395f167212910ef0a54c11d1b8b87f03cb521ffb49437891e80ede
3
+ size 407
runs/Feb18_23-22-11_galactica.ad.cirange.net/events.out.tfevents.1739920937.galactica.ad.cirange.net.2694498.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41e5f5f3def15ee7f85d9a0a1432fa9414cc8109b16559dff1f3a45ae3fe381f
3
+ size 4184
runs/Feb18_23-24-01_galactica.ad.cirange.net/events.out.tfevents.1739921046.galactica.ad.cirange.net.2696018.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8641b6c6acf5053c96bc1ddd5884ae2af5a67e5565e381f3b61667709926d59c
3
+ size 7141
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.15538578709859527,
5
+ "train_runtime": 3316.8972,
6
+ "train_samples_per_second": 34.14,
7
+ "train_steps_per_second": 0.537
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.1398855596780777,
3
+ "best_model_checkpoint": "limbxy_pose/checkpoint-1424",
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1780,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2808988764044944,
13
+ "grad_norm": 28418.23828125,
14
+ "learning_rate": 5e-06,
15
+ "loss": 0.3282,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.5617977528089888,
20
+ "grad_norm": 988445.875,
21
+ "learning_rate": 1e-05,
22
+ "loss": 0.2354,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.8426966292134831,
27
+ "grad_norm": 3329800.75,
28
+ "learning_rate": 1.5e-05,
29
+ "loss": 0.2029,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_loss": 0.2039479911327362,
35
+ "eval_rmse": 0.45160600543022156,
36
+ "eval_runtime": 9.907,
37
+ "eval_samples_per_second": 100.939,
38
+ "eval_steps_per_second": 1.615,
39
+ "step": 89
40
+ },
41
+ {
42
+ "epoch": 1.1235955056179776,
43
+ "grad_norm": 1523742.25,
44
+ "learning_rate": 2e-05,
45
+ "loss": 0.205,
46
+ "step": 100
47
+ },
48
+ {
49
+ "epoch": 1.404494382022472,
50
+ "grad_norm": 2450783.75,
51
+ "learning_rate": 2.5e-05,
52
+ "loss": 0.1895,
53
+ "step": 125
54
+ },
55
+ {
56
+ "epoch": 1.6853932584269664,
57
+ "grad_norm": 2619566.25,
58
+ "learning_rate": 3e-05,
59
+ "loss": 0.1772,
60
+ "step": 150
61
+ },
62
+ {
63
+ "epoch": 1.9662921348314608,
64
+ "grad_norm": 2945143.25,
65
+ "learning_rate": 3.5e-05,
66
+ "loss": 0.1535,
67
+ "step": 175
68
+ },
69
+ {
70
+ "epoch": 2.0,
71
+ "eval_loss": 0.1544913649559021,
72
+ "eval_rmse": 0.39305388927459717,
73
+ "eval_runtime": 9.8727,
74
+ "eval_samples_per_second": 101.29,
75
+ "eval_steps_per_second": 1.621,
76
+ "step": 178
77
+ },
78
+ {
79
+ "epoch": 2.247191011235955,
80
+ "grad_norm": 2628555.25,
81
+ "learning_rate": 4e-05,
82
+ "loss": 0.1539,
83
+ "step": 200
84
+ },
85
+ {
86
+ "epoch": 2.5280898876404496,
87
+ "grad_norm": 4395509.5,
88
+ "learning_rate": 4.5e-05,
89
+ "loss": 0.1725,
90
+ "step": 225
91
+ },
92
+ {
93
+ "epoch": 2.808988764044944,
94
+ "grad_norm": 1393026.125,
95
+ "learning_rate": 5e-05,
96
+ "loss": 0.1847,
97
+ "step": 250
98
+ },
99
+ {
100
+ "epoch": 3.0,
101
+ "eval_loss": 0.16403795778751373,
102
+ "eval_rmse": 0.4050160050392151,
103
+ "eval_runtime": 9.6573,
104
+ "eval_samples_per_second": 103.548,
105
+ "eval_steps_per_second": 1.657,
106
+ "step": 267
107
+ },
108
+ {
109
+ "epoch": 3.0898876404494384,
110
+ "grad_norm": 1010271.875,
111
+ "learning_rate": 4.918300653594771e-05,
112
+ "loss": 0.1705,
113
+ "step": 275
114
+ },
115
+ {
116
+ "epoch": 3.370786516853933,
117
+ "grad_norm": 2326091.75,
118
+ "learning_rate": 4.8366013071895424e-05,
119
+ "loss": 0.1742,
120
+ "step": 300
121
+ },
122
+ {
123
+ "epoch": 3.6516853932584272,
124
+ "grad_norm": 1241515.75,
125
+ "learning_rate": 4.7549019607843135e-05,
126
+ "loss": 0.1615,
127
+ "step": 325
128
+ },
129
+ {
130
+ "epoch": 3.932584269662921,
131
+ "grad_norm": 3322754.5,
132
+ "learning_rate": 4.673202614379085e-05,
133
+ "loss": 0.1582,
134
+ "step": 350
135
+ },
136
+ {
137
+ "epoch": 4.0,
138
+ "eval_loss": 0.14265377819538116,
139
+ "eval_rmse": 0.37769538164138794,
140
+ "eval_runtime": 9.6722,
141
+ "eval_samples_per_second": 103.389,
142
+ "eval_steps_per_second": 1.654,
143
+ "step": 356
144
+ },
145
+ {
146
+ "epoch": 4.213483146067416,
147
+ "grad_norm": 106705.4921875,
148
+ "learning_rate": 4.5915032679738564e-05,
149
+ "loss": 0.1556,
150
+ "step": 375
151
+ },
152
+ {
153
+ "epoch": 4.49438202247191,
154
+ "grad_norm": 1935592.875,
155
+ "learning_rate": 4.5098039215686275e-05,
156
+ "loss": 0.1517,
157
+ "step": 400
158
+ },
159
+ {
160
+ "epoch": 4.775280898876405,
161
+ "grad_norm": 141842.828125,
162
+ "learning_rate": 4.4281045751633986e-05,
163
+ "loss": 0.1471,
164
+ "step": 425
165
+ },
166
+ {
167
+ "epoch": 5.0,
168
+ "eval_loss": 0.14273402094841003,
169
+ "eval_rmse": 0.3778015673160553,
170
+ "eval_runtime": 9.9561,
171
+ "eval_samples_per_second": 100.441,
172
+ "eval_steps_per_second": 1.607,
173
+ "step": 445
174
+ },
175
+ {
176
+ "epoch": 5.056179775280899,
177
+ "grad_norm": 184961.796875,
178
+ "learning_rate": 4.3464052287581704e-05,
179
+ "loss": 0.151,
180
+ "step": 450
181
+ },
182
+ {
183
+ "epoch": 5.337078651685394,
184
+ "grad_norm": 91365.3984375,
185
+ "learning_rate": 4.2647058823529415e-05,
186
+ "loss": 0.1486,
187
+ "step": 475
188
+ },
189
+ {
190
+ "epoch": 5.617977528089888,
191
+ "grad_norm": 1522296.75,
192
+ "learning_rate": 4.1830065359477126e-05,
193
+ "loss": 0.1464,
194
+ "step": 500
195
+ },
196
+ {
197
+ "epoch": 5.898876404494382,
198
+ "grad_norm": 1252876.625,
199
+ "learning_rate": 4.101307189542484e-05,
200
+ "loss": 0.1553,
201
+ "step": 525
202
+ },
203
+ {
204
+ "epoch": 6.0,
205
+ "eval_loss": 0.14694415032863617,
206
+ "eval_rmse": 0.3833329379558563,
207
+ "eval_runtime": 9.5748,
208
+ "eval_samples_per_second": 104.441,
209
+ "eval_steps_per_second": 1.671,
210
+ "step": 534
211
+ },
212
+ {
213
+ "epoch": 6.179775280898877,
214
+ "grad_norm": 803999.8125,
215
+ "learning_rate": 4.0196078431372555e-05,
216
+ "loss": 0.1638,
217
+ "step": 550
218
+ },
219
+ {
220
+ "epoch": 6.460674157303371,
221
+ "grad_norm": 3250220.25,
222
+ "learning_rate": 3.9379084967320266e-05,
223
+ "loss": 0.1676,
224
+ "step": 575
225
+ },
226
+ {
227
+ "epoch": 6.741573033707866,
228
+ "grad_norm": 854642.25,
229
+ "learning_rate": 3.8562091503267977e-05,
230
+ "loss": 0.1613,
231
+ "step": 600
232
+ },
233
+ {
234
+ "epoch": 7.0,
235
+ "eval_loss": 0.14302612841129303,
236
+ "eval_rmse": 0.37818795442581177,
237
+ "eval_runtime": 9.6944,
238
+ "eval_samples_per_second": 103.152,
239
+ "eval_steps_per_second": 1.65,
240
+ "step": 623
241
+ },
242
+ {
243
+ "epoch": 7.022471910112359,
244
+ "grad_norm": 3126045.0,
245
+ "learning_rate": 3.774509803921569e-05,
246
+ "loss": 0.1509,
247
+ "step": 625
248
+ },
249
+ {
250
+ "epoch": 7.303370786516854,
251
+ "grad_norm": 986060.1875,
252
+ "learning_rate": 3.6928104575163405e-05,
253
+ "loss": 0.1589,
254
+ "step": 650
255
+ },
256
+ {
257
+ "epoch": 7.584269662921348,
258
+ "grad_norm": 965071.25,
259
+ "learning_rate": 3.611111111111111e-05,
260
+ "loss": 0.157,
261
+ "step": 675
262
+ },
263
+ {
264
+ "epoch": 7.865168539325842,
265
+ "grad_norm": 262608.5,
266
+ "learning_rate": 3.529411764705883e-05,
267
+ "loss": 0.1471,
268
+ "step": 700
269
+ },
270
+ {
271
+ "epoch": 8.0,
272
+ "eval_loss": 0.14273059368133545,
273
+ "eval_rmse": 0.37779703736305237,
274
+ "eval_runtime": 9.7952,
275
+ "eval_samples_per_second": 102.091,
276
+ "eval_steps_per_second": 1.633,
277
+ "step": 712
278
+ },
279
+ {
280
+ "epoch": 8.146067415730338,
281
+ "grad_norm": 146225.703125,
282
+ "learning_rate": 3.447712418300654e-05,
283
+ "loss": 0.1518,
284
+ "step": 725
285
+ },
286
+ {
287
+ "epoch": 8.426966292134832,
288
+ "grad_norm": 844252.125,
289
+ "learning_rate": 3.366013071895425e-05,
290
+ "loss": 0.1464,
291
+ "step": 750
292
+ },
293
+ {
294
+ "epoch": 8.707865168539326,
295
+ "grad_norm": 2803406.0,
296
+ "learning_rate": 3.284313725490196e-05,
297
+ "loss": 0.1617,
298
+ "step": 775
299
+ },
300
+ {
301
+ "epoch": 8.98876404494382,
302
+ "grad_norm": 524439.0,
303
+ "learning_rate": 3.202614379084967e-05,
304
+ "loss": 0.1587,
305
+ "step": 800
306
+ },
307
+ {
308
+ "epoch": 9.0,
309
+ "eval_loss": 0.1416771560907364,
310
+ "eval_rmse": 0.37640026211738586,
311
+ "eval_runtime": 9.8664,
312
+ "eval_samples_per_second": 101.354,
313
+ "eval_steps_per_second": 1.622,
314
+ "step": 801
315
+ },
316
+ {
317
+ "epoch": 9.269662921348315,
318
+ "grad_norm": 1690231.25,
319
+ "learning_rate": 3.120915032679739e-05,
320
+ "loss": 0.1442,
321
+ "step": 825
322
+ },
323
+ {
324
+ "epoch": 9.55056179775281,
325
+ "grad_norm": 108342.3203125,
326
+ "learning_rate": 3.0392156862745097e-05,
327
+ "loss": 0.1457,
328
+ "step": 850
329
+ },
330
+ {
331
+ "epoch": 9.831460674157304,
332
+ "grad_norm": 2507031.5,
333
+ "learning_rate": 2.957516339869281e-05,
334
+ "loss": 0.1504,
335
+ "step": 875
336
+ },
337
+ {
338
+ "epoch": 10.0,
339
+ "eval_loss": 0.14268024265766144,
340
+ "eval_rmse": 0.3777303695678711,
341
+ "eval_runtime": 9.7272,
342
+ "eval_samples_per_second": 102.805,
343
+ "eval_steps_per_second": 1.645,
344
+ "step": 890
345
+ },
346
+ {
347
+ "epoch": 10.112359550561798,
348
+ "grad_norm": 1206485.625,
349
+ "learning_rate": 2.8758169934640522e-05,
350
+ "loss": 0.1543,
351
+ "step": 900
352
+ },
353
+ {
354
+ "epoch": 10.393258426966293,
355
+ "grad_norm": 744084.75,
356
+ "learning_rate": 2.7941176470588236e-05,
357
+ "loss": 0.1492,
358
+ "step": 925
359
+ },
360
+ {
361
+ "epoch": 10.674157303370787,
362
+ "grad_norm": 185424.90625,
363
+ "learning_rate": 2.7124183006535947e-05,
364
+ "loss": 0.1419,
365
+ "step": 950
366
+ },
367
+ {
368
+ "epoch": 10.955056179775282,
369
+ "grad_norm": 1401596.125,
370
+ "learning_rate": 2.630718954248366e-05,
371
+ "loss": 0.1431,
372
+ "step": 975
373
+ },
374
+ {
375
+ "epoch": 11.0,
376
+ "eval_loss": 0.14292022585868835,
377
+ "eval_rmse": 0.3780479431152344,
378
+ "eval_runtime": 9.6673,
379
+ "eval_samples_per_second": 103.441,
380
+ "eval_steps_per_second": 1.655,
381
+ "step": 979
382
+ },
383
+ {
384
+ "epoch": 11.235955056179776,
385
+ "grad_norm": 110806.1015625,
386
+ "learning_rate": 2.5490196078431373e-05,
387
+ "loss": 0.1395,
388
+ "step": 1000
389
+ },
390
+ {
391
+ "epoch": 11.51685393258427,
392
+ "grad_norm": 802002.3125,
393
+ "learning_rate": 2.4673202614379087e-05,
394
+ "loss": 0.1452,
395
+ "step": 1025
396
+ },
397
+ {
398
+ "epoch": 11.797752808988765,
399
+ "grad_norm": 472430.40625,
400
+ "learning_rate": 2.38562091503268e-05,
401
+ "loss": 0.1455,
402
+ "step": 1050
403
+ },
404
+ {
405
+ "epoch": 12.0,
406
+ "eval_loss": 0.14419254660606384,
407
+ "eval_rmse": 0.37972694635391235,
408
+ "eval_runtime": 10.0288,
409
+ "eval_samples_per_second": 99.713,
410
+ "eval_steps_per_second": 1.595,
411
+ "step": 1068
412
+ },
413
+ {
414
+ "epoch": 12.07865168539326,
415
+ "grad_norm": 151092.5625,
416
+ "learning_rate": 2.303921568627451e-05,
417
+ "loss": 0.145,
418
+ "step": 1075
419
+ },
420
+ {
421
+ "epoch": 12.359550561797754,
422
+ "grad_norm": 722759.6875,
423
+ "learning_rate": 2.2222222222222223e-05,
424
+ "loss": 0.1412,
425
+ "step": 1100
426
+ },
427
+ {
428
+ "epoch": 12.640449438202246,
429
+ "grad_norm": 753801.25,
430
+ "learning_rate": 2.1405228758169934e-05,
431
+ "loss": 0.1411,
432
+ "step": 1125
433
+ },
434
+ {
435
+ "epoch": 12.921348314606742,
436
+ "grad_norm": 1153443.5,
437
+ "learning_rate": 2.058823529411765e-05,
438
+ "loss": 0.1515,
439
+ "step": 1150
440
+ },
441
+ {
442
+ "epoch": 13.0,
443
+ "eval_loss": 0.14310802519321442,
444
+ "eval_rmse": 0.3782961964607239,
445
+ "eval_runtime": 9.8073,
446
+ "eval_samples_per_second": 101.965,
447
+ "eval_steps_per_second": 1.631,
448
+ "step": 1157
449
+ },
450
+ {
451
+ "epoch": 13.202247191011235,
452
+ "grad_norm": 1710282.25,
453
+ "learning_rate": 1.977124183006536e-05,
454
+ "loss": 0.1529,
455
+ "step": 1175
456
+ },
457
+ {
458
+ "epoch": 13.48314606741573,
459
+ "grad_norm": 1276391.0,
460
+ "learning_rate": 1.895424836601307e-05,
461
+ "loss": 0.1454,
462
+ "step": 1200
463
+ },
464
+ {
465
+ "epoch": 13.764044943820224,
466
+ "grad_norm": 521072.875,
467
+ "learning_rate": 1.8137254901960785e-05,
468
+ "loss": 0.1407,
469
+ "step": 1225
470
+ },
471
+ {
472
+ "epoch": 14.0,
473
+ "eval_loss": 0.14430594444274902,
474
+ "eval_rmse": 0.3798762559890747,
475
+ "eval_runtime": 9.7095,
476
+ "eval_samples_per_second": 102.992,
477
+ "eval_steps_per_second": 1.648,
478
+ "step": 1246
479
+ },
480
+ {
481
+ "epoch": 14.044943820224718,
482
+ "grad_norm": 567394.125,
483
+ "learning_rate": 1.7320261437908496e-05,
484
+ "loss": 0.1426,
485
+ "step": 1250
486
+ },
487
+ {
488
+ "epoch": 14.325842696629213,
489
+ "grad_norm": 862934.4375,
490
+ "learning_rate": 1.650326797385621e-05,
491
+ "loss": 0.1399,
492
+ "step": 1275
493
+ },
494
+ {
495
+ "epoch": 14.606741573033707,
496
+ "grad_norm": 449050.59375,
497
+ "learning_rate": 1.568627450980392e-05,
498
+ "loss": 0.1418,
499
+ "step": 1300
500
+ },
501
+ {
502
+ "epoch": 14.887640449438202,
503
+ "grad_norm": 345541.65625,
504
+ "learning_rate": 1.4869281045751634e-05,
505
+ "loss": 0.1436,
506
+ "step": 1325
507
+ },
508
+ {
509
+ "epoch": 15.0,
510
+ "eval_loss": 0.1419454663991928,
511
+ "eval_rmse": 0.37675654888153076,
512
+ "eval_runtime": 9.9551,
513
+ "eval_samples_per_second": 100.451,
514
+ "eval_steps_per_second": 1.607,
515
+ "step": 1335
516
+ },
517
+ {
518
+ "epoch": 15.168539325842696,
519
+ "grad_norm": 438157.09375,
520
+ "learning_rate": 1.4052287581699347e-05,
521
+ "loss": 0.1398,
522
+ "step": 1350
523
+ },
524
+ {
525
+ "epoch": 15.44943820224719,
526
+ "grad_norm": 451428.15625,
527
+ "learning_rate": 1.323529411764706e-05,
528
+ "loss": 0.1497,
529
+ "step": 1375
530
+ },
531
+ {
532
+ "epoch": 15.730337078651685,
533
+ "grad_norm": 794129.4375,
534
+ "learning_rate": 1.2418300653594772e-05,
535
+ "loss": 0.1425,
536
+ "step": 1400
537
+ },
538
+ {
539
+ "epoch": 16.0,
540
+ "eval_loss": 0.1398855596780777,
541
+ "eval_rmse": 0.3740127682685852,
542
+ "eval_runtime": 9.9994,
543
+ "eval_samples_per_second": 100.006,
544
+ "eval_steps_per_second": 1.6,
545
+ "step": 1424
546
+ },
547
+ {
548
+ "epoch": 16.01123595505618,
549
+ "grad_norm": 12731.2216796875,
550
+ "learning_rate": 1.1601307189542485e-05,
551
+ "loss": 0.141,
552
+ "step": 1425
553
+ },
554
+ {
555
+ "epoch": 16.292134831460675,
556
+ "grad_norm": 21009.0859375,
557
+ "learning_rate": 1.0784313725490197e-05,
558
+ "loss": 0.1401,
559
+ "step": 1450
560
+ },
561
+ {
562
+ "epoch": 16.573033707865168,
563
+ "grad_norm": 1118692.875,
564
+ "learning_rate": 9.96732026143791e-06,
565
+ "loss": 0.1431,
566
+ "step": 1475
567
+ },
568
+ {
569
+ "epoch": 16.853932584269664,
570
+ "grad_norm": 259783.9375,
571
+ "learning_rate": 9.150326797385621e-06,
572
+ "loss": 0.1411,
573
+ "step": 1500
574
+ },
575
+ {
576
+ "epoch": 17.0,
577
+ "eval_loss": 0.14006976783275604,
578
+ "eval_rmse": 0.37425896525382996,
579
+ "eval_runtime": 9.6126,
580
+ "eval_samples_per_second": 104.03,
581
+ "eval_steps_per_second": 1.664,
582
+ "step": 1513
583
+ },
584
+ {
585
+ "epoch": 17.134831460674157,
586
+ "grad_norm": 471577.78125,
587
+ "learning_rate": 8.333333333333334e-06,
588
+ "loss": 0.1422,
589
+ "step": 1525
590
+ },
591
+ {
592
+ "epoch": 17.415730337078653,
593
+ "grad_norm": 234170.34375,
594
+ "learning_rate": 7.5163398692810456e-06,
595
+ "loss": 0.143,
596
+ "step": 1550
597
+ },
598
+ {
599
+ "epoch": 17.696629213483146,
600
+ "grad_norm": 31571.07421875,
601
+ "learning_rate": 6.699346405228758e-06,
602
+ "loss": 0.1396,
603
+ "step": 1575
604
+ },
605
+ {
606
+ "epoch": 17.97752808988764,
607
+ "grad_norm": 1643420.5,
608
+ "learning_rate": 5.882352941176471e-06,
609
+ "loss": 0.1401,
610
+ "step": 1600
611
+ },
612
+ {
613
+ "epoch": 18.0,
614
+ "eval_loss": 0.14078088104724884,
615
+ "eval_rmse": 0.3752078115940094,
616
+ "eval_runtime": 9.8278,
617
+ "eval_samples_per_second": 101.752,
618
+ "eval_steps_per_second": 1.628,
619
+ "step": 1602
620
+ },
621
+ {
622
+ "epoch": 18.258426966292134,
623
+ "grad_norm": 972625.9375,
624
+ "learning_rate": 5.065359477124184e-06,
625
+ "loss": 0.14,
626
+ "step": 1625
627
+ },
628
+ {
629
+ "epoch": 18.53932584269663,
630
+ "grad_norm": 504889.40625,
631
+ "learning_rate": 4.2483660130718954e-06,
632
+ "loss": 0.1384,
633
+ "step": 1650
634
+ },
635
+ {
636
+ "epoch": 18.820224719101123,
637
+ "grad_norm": 231932.234375,
638
+ "learning_rate": 3.431372549019608e-06,
639
+ "loss": 0.1426,
640
+ "step": 1675
641
+ },
642
+ {
643
+ "epoch": 19.0,
644
+ "eval_loss": 0.14059938490390778,
645
+ "eval_rmse": 0.3749658763408661,
646
+ "eval_runtime": 9.7603,
647
+ "eval_samples_per_second": 102.456,
648
+ "eval_steps_per_second": 1.639,
649
+ "step": 1691
650
+ },
651
+ {
652
+ "epoch": 19.10112359550562,
653
+ "grad_norm": 26270.4609375,
654
+ "learning_rate": 2.6143790849673204e-06,
655
+ "loss": 0.1407,
656
+ "step": 1700
657
+ },
658
+ {
659
+ "epoch": 19.382022471910112,
660
+ "grad_norm": 157021.171875,
661
+ "learning_rate": 1.7973856209150326e-06,
662
+ "loss": 0.1403,
663
+ "step": 1725
664
+ },
665
+ {
666
+ "epoch": 19.662921348314608,
667
+ "grad_norm": 225804.71875,
668
+ "learning_rate": 9.80392156862745e-07,
669
+ "loss": 0.1402,
670
+ "step": 1750
671
+ },
672
+ {
673
+ "epoch": 19.9438202247191,
674
+ "grad_norm": 273644.4375,
675
+ "learning_rate": 1.6339869281045752e-07,
676
+ "loss": 0.1388,
677
+ "step": 1775
678
+ },
679
+ {
680
+ "epoch": 20.0,
681
+ "eval_loss": 0.13999705016613007,
682
+ "eval_rmse": 0.3741617798805237,
683
+ "eval_runtime": 9.7293,
684
+ "eval_samples_per_second": 102.782,
685
+ "eval_steps_per_second": 1.645,
686
+ "step": 1780
687
+ },
688
+ {
689
+ "epoch": 20.0,
690
+ "step": 1780,
691
+ "total_flos": 0.0,
692
+ "train_loss": 0.15538578709859527,
693
+ "train_runtime": 3316.8972,
694
+ "train_samples_per_second": 34.14,
695
+ "train_steps_per_second": 0.537
696
+ }
697
+ ],
698
+ "logging_steps": 25,
699
+ "max_steps": 1780,
700
+ "num_input_tokens_seen": 0,
701
+ "num_train_epochs": 20,
702
+ "save_steps": 500,
703
+ "stateful_callbacks": {
704
+ "TrainerControl": {
705
+ "args": {
706
+ "should_epoch_stop": false,
707
+ "should_evaluate": false,
708
+ "should_log": false,
709
+ "should_save": true,
710
+ "should_training_stop": true
711
+ },
712
+ "attributes": {}
713
+ }
714
+ },
715
+ "total_flos": 0.0,
716
+ "train_batch_size": 64,
717
+ "trial_name": null,
718
+ "trial_params": null
719
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:073320d8a8b8f606fc2a6ce5df72ddd320a2b60a75b5523491a1df7af08e4dca
3
+ size 5240