jssky commited on
Commit
c6bc3d0
·
verified ·
1 Parent(s): d314599

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "k_proj",
24
- "o_proj",
25
  "v_proj",
26
- "down_proj",
27
  "gate_proj",
28
- "q_proj",
29
- "up_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "k_proj",
 
24
  "v_proj",
 
25
  "gate_proj",
26
+ "down_proj",
27
+ "o_proj",
28
+ "up_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1231cf2320c3ddf0a48d00a2bd902d1c00f7e1dd9637ec965fa3cebb8f2429b6
3
  size 70430032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:300bba3d6dd656ffbc637166ddc9582343511d1ed18f8daea01b247abefa1028
3
  size 70430032
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc4b994d0ca778039ab955f37020ccc28ccaff37cc722a8822ade922d25df1f0
3
  size 141053442
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82ee47a80d61c28550d9cb135a674aed9b17488ca85967cdaeaff8c9c0b02673
3
  size 141053442
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1a0d4bd220fbf49b44423e5ea9fda4712c0b03ac8a92c54c4c5bf15afb5ba6d
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57123e5fa00d8bf64605cdfc92f79dcb4dbb3559bbbfb51b41fdb47d989f8872
3
  size 15984
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:edb6073028f61942e6821f0ab9162b891c86209f99a33c501339016044a9c441
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b45835c83ef9c4c0a5bed75763b431cdc3a11ef9b94140d06591dfd611da7be
3
  size 15984
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bc00ccf75b6762a7f099c9008454b44f1bc9472c3e149f6036b50a350283a44
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14a879c93e2f204b91b53424ff34985f8c3db252cab19bf904db86e98d16d901
3
  size 15984
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90976ef44f5f57e43d0cf99fb2443d55184425ba2d98614513ee4d389a0137e7
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1d3eb999a1ce2a1b384d8d1217cfb813c8f956b1658632c26da6584866764e0
3
  size 15984
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cef9b14b6afee9b8093272b9d0d6f22386d39de0a387fc62a45799e13a42229
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2248e517b2313c40d9be69c5ef59cf308803578165c04b5597de02ed383bee4a
3
  size 15984
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c2d126c6e18640cd77b8db057e63b9b9a5e43322ddd8c6f6562184b4988b02f
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5da2d4b911166dd1ebc15cef278dc1f7e2e363a073fc4d50308736b727522613
3
  size 15984
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c771130a0df904bfee5496151a9d76e7c4d3cb0e42406269389a420e74870b1
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7103113f3895293b822b7def45fa6586fc05ebf64e849f28dc2477747824a24e
3
  size 15984
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f75e4a2caa94385e75fe51cf1899554bcfc4aa25fd8a73b6b7a7a1ccd46f737
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb4133e5dca9b975a0a7d4ac8dcb721e563c26208d4c1c8b5604625ea18eaaf4
3
  size 15984
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eae4efc55c9429b91bdc581cf21d4d1e88799c56c267a0959f657aba014828bf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90ba7ee426c710877af00b51544bc25223e2159a4009a97f5f8c523a61cf68ad
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.010666666666666666,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -18,9 +18,9 @@
18
  {
19
  "epoch": 0.0007111111111111111,
20
  "eval_loss": 4.628085136413574,
21
- "eval_runtime": 44.9571,
22
- "eval_samples_per_second": 52.695,
23
- "eval_steps_per_second": 6.606,
24
  "step": 1
25
  },
26
  {
@@ -39,111 +39,154 @@
39
  },
40
  {
41
  "epoch": 0.0028444444444444446,
42
- "grad_norm": 10.534650802612305,
43
  "learning_rate": 2e-05,
44
  "loss": 4.8927,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0035555555555555557,
49
- "grad_norm": 6.7862019538879395,
50
  "learning_rate": 4e-05,
51
  "loss": 3.526,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0035555555555555557,
56
- "eval_loss": 4.499617576599121,
57
- "eval_runtime": 39.947,
58
- "eval_samples_per_second": 59.304,
59
- "eval_steps_per_second": 7.435,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.004266666666666667,
64
- "grad_norm": 7.032112121582031,
65
  "learning_rate": 6e-05,
66
- "loss": 3.8045,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.004977777777777778,
71
- "grad_norm": 6.311283111572266,
72
  "learning_rate": 8e-05,
73
- "loss": 4.0029,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.005688888888888889,
78
- "grad_norm": 5.887501239776611,
79
  "learning_rate": 0.0001,
80
- "loss": 3.8592,
81
  "step": 8
82
  },
83
  {
84
  "epoch": 0.0064,
85
- "grad_norm": 4.442018508911133,
86
  "learning_rate": 0.00012,
87
- "loss": 3.252,
88
  "step": 9
89
  },
90
  {
91
  "epoch": 0.0071111111111111115,
92
- "grad_norm": 4.024985313415527,
93
  "learning_rate": 0.00014,
94
- "loss": 3.4748,
95
  "step": 10
96
  },
97
  {
98
  "epoch": 0.0071111111111111115,
99
- "eval_loss": 3.1893694400787354,
100
- "eval_runtime": 37.4378,
101
- "eval_samples_per_second": 63.278,
102
- "eval_steps_per_second": 7.933,
103
  "step": 10
104
  },
105
  {
106
  "epoch": 0.007822222222222222,
107
- "grad_norm": 3.4165191650390625,
108
  "learning_rate": 0.00016,
109
- "loss": 2.8739,
110
  "step": 11
111
  },
112
  {
113
  "epoch": 0.008533333333333334,
114
- "grad_norm": 3.099381446838379,
115
  "learning_rate": 0.00018,
116
- "loss": 2.9354,
117
  "step": 12
118
  },
119
  {
120
  "epoch": 0.009244444444444444,
121
- "grad_norm": 2.8003978729248047,
122
  "learning_rate": 0.0002,
123
- "loss": 2.7117,
124
  "step": 13
125
  },
126
  {
127
  "epoch": 0.009955555555555556,
128
- "grad_norm": 3.4195311069488525,
129
  "learning_rate": 0.00019510565162951537,
130
- "loss": 2.9435,
131
  "step": 14
132
  },
133
  {
134
  "epoch": 0.010666666666666666,
135
- "grad_norm": 4.078439712524414,
136
  "learning_rate": 0.00018090169943749476,
137
- "loss": 2.9743,
138
  "step": 15
139
  },
140
  {
141
  "epoch": 0.010666666666666666,
142
- "eval_loss": 2.8235130310058594,
143
- "eval_runtime": 37.5377,
144
- "eval_samples_per_second": 63.11,
145
- "eval_steps_per_second": 7.912,
146
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 1,
@@ -158,12 +201,12 @@
158
  "should_evaluate": false,
159
  "should_log": false,
160
  "should_save": true,
161
- "should_training_stop": false
162
  },
163
  "attributes": {}
164
  }
165
  },
166
- "total_flos": 4429513689661440.0,
167
  "train_batch_size": 1,
168
  "trial_name": null,
169
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.014222222222222223,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
18
  {
19
  "epoch": 0.0007111111111111111,
20
  "eval_loss": 4.628085136413574,
21
+ "eval_runtime": 44.4506,
22
+ "eval_samples_per_second": 53.295,
23
+ "eval_steps_per_second": 6.682,
24
  "step": 1
25
  },
26
  {
 
39
  },
40
  {
41
  "epoch": 0.0028444444444444446,
42
+ "grad_norm": 10.709222793579102,
43
  "learning_rate": 2e-05,
44
  "loss": 4.8927,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0035555555555555557,
49
+ "grad_norm": 6.892472267150879,
50
  "learning_rate": 4e-05,
51
  "loss": 3.526,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0035555555555555557,
56
+ "eval_loss": 4.497377395629883,
57
+ "eval_runtime": 39.8243,
58
+ "eval_samples_per_second": 59.486,
59
+ "eval_steps_per_second": 7.458,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.004266666666666667,
64
+ "grad_norm": 7.115167617797852,
65
  "learning_rate": 6e-05,
66
+ "loss": 3.8025,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.004977777777777778,
71
+ "grad_norm": 6.403872489929199,
72
  "learning_rate": 8e-05,
73
+ "loss": 3.9999,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.005688888888888889,
78
+ "grad_norm": 5.959023952484131,
79
  "learning_rate": 0.0001,
80
+ "loss": 3.8508,
81
  "step": 8
82
  },
83
  {
84
  "epoch": 0.0064,
85
+ "grad_norm": 4.489181041717529,
86
  "learning_rate": 0.00012,
87
+ "loss": 3.2468,
88
  "step": 9
89
  },
90
  {
91
  "epoch": 0.0071111111111111115,
92
+ "grad_norm": 4.083322525024414,
93
  "learning_rate": 0.00014,
94
+ "loss": 3.4724,
95
  "step": 10
96
  },
97
  {
98
  "epoch": 0.0071111111111111115,
99
+ "eval_loss": 3.18164324760437,
100
+ "eval_runtime": 37.2748,
101
+ "eval_samples_per_second": 63.555,
102
+ "eval_steps_per_second": 7.968,
103
  "step": 10
104
  },
105
  {
106
  "epoch": 0.007822222222222222,
107
+ "grad_norm": 3.44504714012146,
108
  "learning_rate": 0.00016,
109
+ "loss": 2.8699,
110
  "step": 11
111
  },
112
  {
113
  "epoch": 0.008533333333333334,
114
+ "grad_norm": 3.153762102127075,
115
  "learning_rate": 0.00018,
116
+ "loss": 2.9342,
117
  "step": 12
118
  },
119
  {
120
  "epoch": 0.009244444444444444,
121
+ "grad_norm": 2.8073348999023438,
122
  "learning_rate": 0.0002,
123
+ "loss": 2.705,
124
  "step": 13
125
  },
126
  {
127
  "epoch": 0.009955555555555556,
128
+ "grad_norm": 3.4868123531341553,
129
  "learning_rate": 0.00019510565162951537,
130
+ "loss": 2.9463,
131
  "step": 14
132
  },
133
  {
134
  "epoch": 0.010666666666666666,
135
+ "grad_norm": 4.070212364196777,
136
  "learning_rate": 0.00018090169943749476,
137
+ "loss": 2.9741,
138
  "step": 15
139
  },
140
  {
141
  "epoch": 0.010666666666666666,
142
+ "eval_loss": 2.8233630657196045,
143
+ "eval_runtime": 37.3991,
144
+ "eval_samples_per_second": 63.344,
145
+ "eval_steps_per_second": 7.941,
146
  "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.011377777777777778,
150
+ "grad_norm": 3.4135398864746094,
151
+ "learning_rate": 0.00015877852522924732,
152
+ "loss": 2.9531,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.012088888888888889,
157
+ "grad_norm": 3.347476005554199,
158
+ "learning_rate": 0.00013090169943749476,
159
+ "loss": 2.9862,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.0128,
164
+ "grad_norm": 3.600923538208008,
165
+ "learning_rate": 0.0001,
166
+ "loss": 2.8065,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.013511111111111111,
171
+ "grad_norm": 3.2915897369384766,
172
+ "learning_rate": 6.909830056250527e-05,
173
+ "loss": 2.9556,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.014222222222222223,
178
+ "grad_norm": 3.3658218383789062,
179
+ "learning_rate": 4.12214747707527e-05,
180
+ "loss": 2.9325,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.014222222222222223,
185
+ "eval_loss": 2.736981153488159,
186
+ "eval_runtime": 39.7772,
187
+ "eval_samples_per_second": 59.557,
188
+ "eval_steps_per_second": 7.467,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
201
  "should_evaluate": false,
202
  "should_log": false,
203
  "should_save": true,
204
+ "should_training_stop": true
205
  },
206
  "attributes": {}
207
  }
208
  },
209
+ "total_flos": 5906018252881920.0,
210
  "train_batch_size": 1,
211
  "trial_name": null,
212
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8141418bd3841ca1203dbe43091f9f4c2e7bd8a0fcec4b0acaf1e81043f1ff89
3
  size 6712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86fc0fae7af6c964d28cbdfdf0241b208b3b6f862c8f1df4ccaad6f6a5f33100
3
  size 6712