lillian039
commited on
Commit
•
46b3374
1
Parent(s):
1e57a6e
Model save
Browse files
README.md
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: llama3.1
|
4 |
+
base_model: meta-llama/Meta-Llama-3.1-8B-Instruct
|
5 |
+
tags:
|
6 |
+
- trl
|
7 |
+
- sft
|
8 |
+
- generated_from_trainer
|
9 |
+
model-index:
|
10 |
+
- name: barc-llama3.1-8b-instruct-fft-transduction-gpt4omini10k_lr1e-5_epoch1__
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# barc-llama3.1-8b-instruct-fft-transduction-gpt4omini10k_lr1e-5_epoch1__
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct) on an unknown dataset.
|
20 |
+
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.0932
|
22 |
+
|
23 |
+
## Model description
|
24 |
+
|
25 |
+
More information needed
|
26 |
+
|
27 |
+
## Intended uses & limitations
|
28 |
+
|
29 |
+
More information needed
|
30 |
+
|
31 |
+
## Training and evaluation data
|
32 |
+
|
33 |
+
More information needed
|
34 |
+
|
35 |
+
## Training procedure
|
36 |
+
|
37 |
+
### Training hyperparameters
|
38 |
+
|
39 |
+
The following hyperparameters were used during training:
|
40 |
+
- learning_rate: 1e-05
|
41 |
+
- train_batch_size: 8
|
42 |
+
- eval_batch_size: 4
|
43 |
+
- seed: 42
|
44 |
+
- distributed_type: multi-GPU
|
45 |
+
- num_devices: 8
|
46 |
+
- gradient_accumulation_steps: 2
|
47 |
+
- total_train_batch_size: 128
|
48 |
+
- total_eval_batch_size: 32
|
49 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
50 |
+
- lr_scheduler_type: cosine
|
51 |
+
- lr_scheduler_warmup_ratio: 0.1
|
52 |
+
- num_epochs: 1
|
53 |
+
|
54 |
+
### Training results
|
55 |
+
|
56 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
57 |
+
|:-------------:|:-----:|:----:|:---------------:|
|
58 |
+
| 0.1306 | 1.0 | 76 | 0.0932 |
|
59 |
+
|
60 |
+
|
61 |
+
### Framework versions
|
62 |
+
|
63 |
+
- Transformers 4.45.0.dev0
|
64 |
+
- Pytorch 2.4.1+cu121
|
65 |
+
- Datasets 3.0.0
|
66 |
+
- Tokenizers 0.19.1
|
all_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 1.0,
|
3 |
+
"total_flos": 2.244031216091136e+16,
|
4 |
+
"train_loss": 0.12603967764267796,
|
5 |
+
"train_runtime": 1049.273,
|
6 |
+
"train_samples": 9698,
|
7 |
+
"train_samples_per_second": 9.243,
|
8 |
+
"train_steps_per_second": 0.072
|
9 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 128000,
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": [
|
5 |
+
128001,
|
6 |
+
128008,
|
7 |
+
128009
|
8 |
+
],
|
9 |
+
"temperature": 0.6,
|
10 |
+
"top_p": 0.9,
|
11 |
+
"transformers_version": "4.45.0.dev0"
|
12 |
+
}
|
runs/Sep25_15-48-00_instance-20240925-140811/events.out.tfevents.1727279394.instance-20240925-140811.26163.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6455486165ae36c873a7077bd5be5b43db675bf618d5c38c7de1e64854a6b6a
|
3 |
+
size 22418
|
train_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 1.0,
|
3 |
+
"total_flos": 2.244031216091136e+16,
|
4 |
+
"train_loss": 0.12603967764267796,
|
5 |
+
"train_runtime": 1049.273,
|
6 |
+
"train_samples": 9698,
|
7 |
+
"train_samples_per_second": 9.243,
|
8 |
+
"train_steps_per_second": 0.072
|
9 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,582 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 1.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 76,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.013157894736842105,
|
13 |
+
"grad_norm": 13.238144898761828,
|
14 |
+
"learning_rate": 1.25e-06,
|
15 |
+
"loss": 0.3431,
|
16 |
+
"step": 1
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.02631578947368421,
|
20 |
+
"grad_norm": 13.138533030251612,
|
21 |
+
"learning_rate": 2.5e-06,
|
22 |
+
"loss": 0.3429,
|
23 |
+
"step": 2
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.039473684210526314,
|
27 |
+
"grad_norm": 11.211925298543088,
|
28 |
+
"learning_rate": 3.7500000000000005e-06,
|
29 |
+
"loss": 0.2715,
|
30 |
+
"step": 3
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.05263157894736842,
|
34 |
+
"grad_norm": 9.07840716756589,
|
35 |
+
"learning_rate": 5e-06,
|
36 |
+
"loss": 0.2923,
|
37 |
+
"step": 4
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.06578947368421052,
|
41 |
+
"grad_norm": 4.255454799591527,
|
42 |
+
"learning_rate": 6.25e-06,
|
43 |
+
"loss": 0.1766,
|
44 |
+
"step": 5
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.07894736842105263,
|
48 |
+
"grad_norm": 4.899723911649479,
|
49 |
+
"learning_rate": 7.500000000000001e-06,
|
50 |
+
"loss": 0.1795,
|
51 |
+
"step": 6
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.09210526315789473,
|
55 |
+
"grad_norm": 21.444343227297015,
|
56 |
+
"learning_rate": 8.750000000000001e-06,
|
57 |
+
"loss": 0.2065,
|
58 |
+
"step": 7
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 0.10526315789473684,
|
62 |
+
"grad_norm": 4.9912226694910995,
|
63 |
+
"learning_rate": 1e-05,
|
64 |
+
"loss": 0.1955,
|
65 |
+
"step": 8
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.11842105263157894,
|
69 |
+
"grad_norm": 6.015085827837624,
|
70 |
+
"learning_rate": 9.994664874011864e-06,
|
71 |
+
"loss": 0.2171,
|
72 |
+
"step": 9
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 0.13157894736842105,
|
76 |
+
"grad_norm": 6.232109397110641,
|
77 |
+
"learning_rate": 9.978670881475173e-06,
|
78 |
+
"loss": 0.1812,
|
79 |
+
"step": 10
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.14473684210526316,
|
83 |
+
"grad_norm": 13.260525050150829,
|
84 |
+
"learning_rate": 9.952052154376027e-06,
|
85 |
+
"loss": 0.2299,
|
86 |
+
"step": 11
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.15789473684210525,
|
90 |
+
"grad_norm": 2.8474419609274584,
|
91 |
+
"learning_rate": 9.91486549841951e-06,
|
92 |
+
"loss": 0.1729,
|
93 |
+
"step": 12
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.17105263157894737,
|
97 |
+
"grad_norm": 4.441367319332706,
|
98 |
+
"learning_rate": 9.867190271803466e-06,
|
99 |
+
"loss": 0.1394,
|
100 |
+
"step": 13
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 0.18421052631578946,
|
104 |
+
"grad_norm": 3.9791362793155245,
|
105 |
+
"learning_rate": 9.809128215864096e-06,
|
106 |
+
"loss": 0.1596,
|
107 |
+
"step": 14
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 0.19736842105263158,
|
111 |
+
"grad_norm": 5.398799668373648,
|
112 |
+
"learning_rate": 9.74080323795483e-06,
|
113 |
+
"loss": 0.1883,
|
114 |
+
"step": 15
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.21052631578947367,
|
118 |
+
"grad_norm": 3.801676563771218,
|
119 |
+
"learning_rate": 9.66236114702178e-06,
|
120 |
+
"loss": 0.1542,
|
121 |
+
"step": 16
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.2236842105263158,
|
125 |
+
"grad_norm": 1.925322887230293,
|
126 |
+
"learning_rate": 9.573969342440107e-06,
|
127 |
+
"loss": 0.1535,
|
128 |
+
"step": 17
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.23684210526315788,
|
132 |
+
"grad_norm": 2.4885347468657666,
|
133 |
+
"learning_rate": 9.475816456775313e-06,
|
134 |
+
"loss": 0.1495,
|
135 |
+
"step": 18
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.25,
|
139 |
+
"grad_norm": 1.7097680648804299,
|
140 |
+
"learning_rate": 9.368111953231849e-06,
|
141 |
+
"loss": 0.0999,
|
142 |
+
"step": 19
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 0.2631578947368421,
|
146 |
+
"grad_norm": 1.7070432708248722,
|
147 |
+
"learning_rate": 9.251085678648072e-06,
|
148 |
+
"loss": 0.1556,
|
149 |
+
"step": 20
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 0.27631578947368424,
|
153 |
+
"grad_norm": 2.2017885079132475,
|
154 |
+
"learning_rate": 9.124987372991512e-06,
|
155 |
+
"loss": 0.1323,
|
156 |
+
"step": 21
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"epoch": 0.2894736842105263,
|
160 |
+
"grad_norm": 2.2923056410651657,
|
161 |
+
"learning_rate": 8.990086136401199e-06,
|
162 |
+
"loss": 0.1442,
|
163 |
+
"step": 22
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 0.3026315789473684,
|
167 |
+
"grad_norm": 1.3443143761442609,
|
168 |
+
"learning_rate": 8.846669854914395e-06,
|
169 |
+
"loss": 0.1067,
|
170 |
+
"step": 23
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.3157894736842105,
|
174 |
+
"grad_norm": 2.499856741628817,
|
175 |
+
"learning_rate": 8.695044586103297e-06,
|
176 |
+
"loss": 0.1428,
|
177 |
+
"step": 24
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 0.32894736842105265,
|
181 |
+
"grad_norm": 1.6830200645247368,
|
182 |
+
"learning_rate": 8.535533905932739e-06,
|
183 |
+
"loss": 0.1201,
|
184 |
+
"step": 25
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.34210526315789475,
|
188 |
+
"grad_norm": 1.5831400874663497,
|
189 |
+
"learning_rate": 8.368478218232787e-06,
|
190 |
+
"loss": 0.114,
|
191 |
+
"step": 26
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.35526315789473684,
|
195 |
+
"grad_norm": 1.6535836650289428,
|
196 |
+
"learning_rate": 8.194234028259806e-06,
|
197 |
+
"loss": 0.1209,
|
198 |
+
"step": 27
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.3684210526315789,
|
202 |
+
"grad_norm": 1.4967093603851553,
|
203 |
+
"learning_rate": 8.013173181896283e-06,
|
204 |
+
"loss": 0.0847,
|
205 |
+
"step": 28
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.3815789473684211,
|
209 |
+
"grad_norm": 1.6642707923715867,
|
210 |
+
"learning_rate": 7.82568207211296e-06,
|
211 |
+
"loss": 0.1259,
|
212 |
+
"step": 29
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.39473684210526316,
|
216 |
+
"grad_norm": 1.1497880560263245,
|
217 |
+
"learning_rate": 7.63216081438678e-06,
|
218 |
+
"loss": 0.1029,
|
219 |
+
"step": 30
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.40789473684210525,
|
223 |
+
"grad_norm": 1.3303308469541215,
|
224 |
+
"learning_rate": 7.4330223928342814e-06,
|
225 |
+
"loss": 0.1004,
|
226 |
+
"step": 31
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.42105263157894735,
|
230 |
+
"grad_norm": 2.427667338258928,
|
231 |
+
"learning_rate": 7.2286917788826926e-06,
|
232 |
+
"loss": 0.0991,
|
233 |
+
"step": 32
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.4342105263157895,
|
237 |
+
"grad_norm": 1.4112715962724514,
|
238 |
+
"learning_rate": 7.019605024359475e-06,
|
239 |
+
"loss": 0.1117,
|
240 |
+
"step": 33
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.4473684210526316,
|
244 |
+
"grad_norm": 1.0945264209924628,
|
245 |
+
"learning_rate": 6.806208330935766e-06,
|
246 |
+
"loss": 0.0974,
|
247 |
+
"step": 34
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"epoch": 0.4605263157894737,
|
251 |
+
"grad_norm": 2.222867676262349,
|
252 |
+
"learning_rate": 6.588957097909509e-06,
|
253 |
+
"loss": 0.1114,
|
254 |
+
"step": 35
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 0.47368421052631576,
|
258 |
+
"grad_norm": 1.445820032177189,
|
259 |
+
"learning_rate": 6.368314950360416e-06,
|
260 |
+
"loss": 0.1082,
|
261 |
+
"step": 36
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.4868421052631579,
|
265 |
+
"grad_norm": 2.005804185500154,
|
266 |
+
"learning_rate": 6.144752749750671e-06,
|
267 |
+
"loss": 0.0932,
|
268 |
+
"step": 37
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"epoch": 0.5,
|
272 |
+
"grad_norm": 1.4446629324877371,
|
273 |
+
"learning_rate": 5.918747589082853e-06,
|
274 |
+
"loss": 0.11,
|
275 |
+
"step": 38
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"epoch": 0.5131578947368421,
|
279 |
+
"grad_norm": 1.2013902671499292,
|
280 |
+
"learning_rate": 5.690781774759412e-06,
|
281 |
+
"loss": 0.088,
|
282 |
+
"step": 39
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"epoch": 0.5263157894736842,
|
286 |
+
"grad_norm": 0.9671394427016465,
|
287 |
+
"learning_rate": 5.46134179731651e-06,
|
288 |
+
"loss": 0.0965,
|
289 |
+
"step": 40
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.5394736842105263,
|
293 |
+
"grad_norm": 0.874352355591093,
|
294 |
+
"learning_rate": 5.230917293228699e-06,
|
295 |
+
"loss": 0.0827,
|
296 |
+
"step": 41
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"epoch": 0.5526315789473685,
|
300 |
+
"grad_norm": 1.0990734862760956,
|
301 |
+
"learning_rate": 5e-06,
|
302 |
+
"loss": 0.1026,
|
303 |
+
"step": 42
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"epoch": 0.5657894736842105,
|
307 |
+
"grad_norm": 1.3629123541548118,
|
308 |
+
"learning_rate": 4.7690827067713035e-06,
|
309 |
+
"loss": 0.1379,
|
310 |
+
"step": 43
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.5789473684210527,
|
314 |
+
"grad_norm": 1.1191667397706833,
|
315 |
+
"learning_rate": 4.53865820268349e-06,
|
316 |
+
"loss": 0.1006,
|
317 |
+
"step": 44
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"epoch": 0.5921052631578947,
|
321 |
+
"grad_norm": 1.1613132302632512,
|
322 |
+
"learning_rate": 4.309218225240591e-06,
|
323 |
+
"loss": 0.1196,
|
324 |
+
"step": 45
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.6052631578947368,
|
328 |
+
"grad_norm": 1.4178964529072136,
|
329 |
+
"learning_rate": 4.081252410917148e-06,
|
330 |
+
"loss": 0.1107,
|
331 |
+
"step": 46
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"epoch": 0.618421052631579,
|
335 |
+
"grad_norm": 0.7977784874888711,
|
336 |
+
"learning_rate": 3.855247250249331e-06,
|
337 |
+
"loss": 0.0797,
|
338 |
+
"step": 47
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 0.631578947368421,
|
342 |
+
"grad_norm": 1.0845045774450395,
|
343 |
+
"learning_rate": 3.6316850496395863e-06,
|
344 |
+
"loss": 0.1116,
|
345 |
+
"step": 48
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.6447368421052632,
|
349 |
+
"grad_norm": 1.8478183045971928,
|
350 |
+
"learning_rate": 3.4110429020904924e-06,
|
351 |
+
"loss": 0.1146,
|
352 |
+
"step": 49
|
353 |
+
},
|
354 |
+
{
|
355 |
+
"epoch": 0.6578947368421053,
|
356 |
+
"grad_norm": 0.7367795913317746,
|
357 |
+
"learning_rate": 3.1937916690642356e-06,
|
358 |
+
"loss": 0.0685,
|
359 |
+
"step": 50
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"epoch": 0.6710526315789473,
|
363 |
+
"grad_norm": 1.1081708398994705,
|
364 |
+
"learning_rate": 2.980394975640526e-06,
|
365 |
+
"loss": 0.0934,
|
366 |
+
"step": 51
|
367 |
+
},
|
368 |
+
{
|
369 |
+
"epoch": 0.6842105263157895,
|
370 |
+
"grad_norm": 6.210726159509134,
|
371 |
+
"learning_rate": 2.771308221117309e-06,
|
372 |
+
"loss": 0.1012,
|
373 |
+
"step": 52
|
374 |
+
},
|
375 |
+
{
|
376 |
+
"epoch": 0.6973684210526315,
|
377 |
+
"grad_norm": 0.987185487363248,
|
378 |
+
"learning_rate": 2.5669776071657194e-06,
|
379 |
+
"loss": 0.1173,
|
380 |
+
"step": 53
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"epoch": 0.7105263157894737,
|
384 |
+
"grad_norm": 1.0233786719046054,
|
385 |
+
"learning_rate": 2.3678391856132203e-06,
|
386 |
+
"loss": 0.1006,
|
387 |
+
"step": 54
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.7236842105263158,
|
391 |
+
"grad_norm": 0.8805942584459667,
|
392 |
+
"learning_rate": 2.174317927887041e-06,
|
393 |
+
"loss": 0.0773,
|
394 |
+
"step": 55
|
395 |
+
},
|
396 |
+
{
|
397 |
+
"epoch": 0.7368421052631579,
|
398 |
+
"grad_norm": 1.4063628412136746,
|
399 |
+
"learning_rate": 1.9868268181037186e-06,
|
400 |
+
"loss": 0.1285,
|
401 |
+
"step": 56
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.75,
|
405 |
+
"grad_norm": 0.7609777001959152,
|
406 |
+
"learning_rate": 1.8057659717401948e-06,
|
407 |
+
"loss": 0.0827,
|
408 |
+
"step": 57
|
409 |
+
},
|
410 |
+
{
|
411 |
+
"epoch": 0.7631578947368421,
|
412 |
+
"grad_norm": 0.699472385650843,
|
413 |
+
"learning_rate": 1.6315217817672142e-06,
|
414 |
+
"loss": 0.0728,
|
415 |
+
"step": 58
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"epoch": 0.7763157894736842,
|
419 |
+
"grad_norm": 0.8086483434606366,
|
420 |
+
"learning_rate": 1.4644660940672628e-06,
|
421 |
+
"loss": 0.0814,
|
422 |
+
"step": 59
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"epoch": 0.7894736842105263,
|
426 |
+
"grad_norm": 0.7120681165430872,
|
427 |
+
"learning_rate": 1.3049554138967052e-06,
|
428 |
+
"loss": 0.0746,
|
429 |
+
"step": 60
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.8026315789473685,
|
433 |
+
"grad_norm": 0.6530934488185232,
|
434 |
+
"learning_rate": 1.1533301450856054e-06,
|
435 |
+
"loss": 0.0759,
|
436 |
+
"step": 61
|
437 |
+
},
|
438 |
+
{
|
439 |
+
"epoch": 0.8157894736842105,
|
440 |
+
"grad_norm": 0.6010174144424382,
|
441 |
+
"learning_rate": 1.0099138635988026e-06,
|
442 |
+
"loss": 0.0851,
|
443 |
+
"step": 62
|
444 |
+
},
|
445 |
+
{
|
446 |
+
"epoch": 0.8289473684210527,
|
447 |
+
"grad_norm": 0.7632251026621609,
|
448 |
+
"learning_rate": 8.750126270084891e-07,
|
449 |
+
"loss": 0.133,
|
450 |
+
"step": 63
|
451 |
+
},
|
452 |
+
{
|
453 |
+
"epoch": 0.8421052631578947,
|
454 |
+
"grad_norm": 0.9433341471827504,
|
455 |
+
"learning_rate": 7.489143213519301e-07,
|
456 |
+
"loss": 0.0887,
|
457 |
+
"step": 64
|
458 |
+
},
|
459 |
+
{
|
460 |
+
"epoch": 0.8552631578947368,
|
461 |
+
"grad_norm": 0.6626834135219665,
|
462 |
+
"learning_rate": 6.318880467681527e-07,
|
463 |
+
"loss": 0.0612,
|
464 |
+
"step": 65
|
465 |
+
},
|
466 |
+
{
|
467 |
+
"epoch": 0.868421052631579,
|
468 |
+
"grad_norm": 0.6498581718557976,
|
469 |
+
"learning_rate": 5.241835432246888e-07,
|
470 |
+
"loss": 0.0807,
|
471 |
+
"step": 66
|
472 |
+
},
|
473 |
+
{
|
474 |
+
"epoch": 0.881578947368421,
|
475 |
+
"grad_norm": 0.7771761015493923,
|
476 |
+
"learning_rate": 4.2603065755989493e-07,
|
477 |
+
"loss": 0.1067,
|
478 |
+
"step": 67
|
479 |
+
},
|
480 |
+
{
|
481 |
+
"epoch": 0.8947368421052632,
|
482 |
+
"grad_norm": 0.7594547340399601,
|
483 |
+
"learning_rate": 3.3763885297822153e-07,
|
484 |
+
"loss": 0.0803,
|
485 |
+
"step": 68
|
486 |
+
},
|
487 |
+
{
|
488 |
+
"epoch": 0.9078947368421053,
|
489 |
+
"grad_norm": 0.980352554631326,
|
490 |
+
"learning_rate": 2.5919676204517073e-07,
|
491 |
+
"loss": 0.0956,
|
492 |
+
"step": 69
|
493 |
+
},
|
494 |
+
{
|
495 |
+
"epoch": 0.9210526315789473,
|
496 |
+
"grad_norm": 0.8650676858446684,
|
497 |
+
"learning_rate": 1.908717841359048e-07,
|
498 |
+
"loss": 0.0798,
|
499 |
+
"step": 70
|
500 |
+
},
|
501 |
+
{
|
502 |
+
"epoch": 0.9342105263157895,
|
503 |
+
"grad_norm": 0.6440476763274703,
|
504 |
+
"learning_rate": 1.328097281965357e-07,
|
505 |
+
"loss": 0.0724,
|
506 |
+
"step": 71
|
507 |
+
},
|
508 |
+
{
|
509 |
+
"epoch": 0.9473684210526315,
|
510 |
+
"grad_norm": 0.989220896843026,
|
511 |
+
"learning_rate": 8.513450158049109e-08,
|
512 |
+
"loss": 0.1021,
|
513 |
+
"step": 72
|
514 |
+
},
|
515 |
+
{
|
516 |
+
"epoch": 0.9605263157894737,
|
517 |
+
"grad_norm": 0.6855314313706689,
|
518 |
+
"learning_rate": 4.794784562397459e-08,
|
519 |
+
"loss": 0.0656,
|
520 |
+
"step": 73
|
521 |
+
},
|
522 |
+
{
|
523 |
+
"epoch": 0.9736842105263158,
|
524 |
+
"grad_norm": 0.8621391032787515,
|
525 |
+
"learning_rate": 2.1329118524827662e-08,
|
526 |
+
"loss": 0.0734,
|
527 |
+
"step": 74
|
528 |
+
},
|
529 |
+
{
|
530 |
+
"epoch": 0.9868421052631579,
|
531 |
+
"grad_norm": 0.7312318211226774,
|
532 |
+
"learning_rate": 5.3351259881379016e-09,
|
533 |
+
"loss": 0.0711,
|
534 |
+
"step": 75
|
535 |
+
},
|
536 |
+
{
|
537 |
+
"epoch": 1.0,
|
538 |
+
"grad_norm": 0.8656756829991957,
|
539 |
+
"learning_rate": 0.0,
|
540 |
+
"loss": 0.1306,
|
541 |
+
"step": 76
|
542 |
+
},
|
543 |
+
{
|
544 |
+
"epoch": 1.0,
|
545 |
+
"eval_loss": 0.09319383651018143,
|
546 |
+
"eval_runtime": 140.3661,
|
547 |
+
"eval_samples_per_second": 36.369,
|
548 |
+
"eval_steps_per_second": 1.14,
|
549 |
+
"step": 76
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 1.0,
|
553 |
+
"step": 76,
|
554 |
+
"total_flos": 2.244031216091136e+16,
|
555 |
+
"train_loss": 0.12603967764267796,
|
556 |
+
"train_runtime": 1049.273,
|
557 |
+
"train_samples_per_second": 9.243,
|
558 |
+
"train_steps_per_second": 0.072
|
559 |
+
}
|
560 |
+
],
|
561 |
+
"logging_steps": 1,
|
562 |
+
"max_steps": 76,
|
563 |
+
"num_input_tokens_seen": 0,
|
564 |
+
"num_train_epochs": 1,
|
565 |
+
"save_steps": 500,
|
566 |
+
"stateful_callbacks": {
|
567 |
+
"TrainerControl": {
|
568 |
+
"args": {
|
569 |
+
"should_epoch_stop": false,
|
570 |
+
"should_evaluate": false,
|
571 |
+
"should_log": false,
|
572 |
+
"should_save": true,
|
573 |
+
"should_training_stop": true
|
574 |
+
},
|
575 |
+
"attributes": {}
|
576 |
+
}
|
577 |
+
},
|
578 |
+
"total_flos": 2.244031216091136e+16,
|
579 |
+
"train_batch_size": 8,
|
580 |
+
"trial_name": null,
|
581 |
+
"trial_params": null
|
582 |
+
}
|