CoruNethron commited on
Commit
3c2b56c
·
1 Parent(s): df8881d

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-generation
5
+ widget:
6
+ - text: "I love AutoTrain because "
7
+ ---
8
+
9
+ # Model Trained Using AutoTrain
adapter_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "Intel/neural-chat-7b-v3-1",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj",
19
+ "k_proj",
20
+ "o_proj"
21
+ ],
22
+ "task_type": "CAUSAL_LM"
23
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef04c3727c11199ab341bb57910cbd50a61ec95a0117a88268911768f2579add
3
+ size 54618762
checkpoint-4122/README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: True
22
+ - load_in_4bit: False
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: fp4
28
+ - bnb_4bit_use_double_quant: False
29
+ - bnb_4bit_compute_dtype: float32
30
+ ### Framework versions
31
+
32
+ - PEFT 0.5.0
33
+
34
+ - PEFT 0.5.0
checkpoint-4122/adapter_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "Intel/neural-chat-7b-v3-1",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj",
19
+ "k_proj",
20
+ "o_proj"
21
+ ],
22
+ "task_type": "CAUSAL_LM"
23
+ }
checkpoint-4122/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef04c3727c11199ab341bb57910cbd50a61ec95a0117a88268911768f2579add
3
+ size 54618762
checkpoint-4122/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ece8c0ee1b72ebebf9f2e19e27fdcd5613f438c94de317cfd6ac0c2471dd69b
3
+ size 4072
checkpoint-4122/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d91923fbd95309beca79b3cc9cb6a9510e28d9b6600d0d57b5b45838d2169f26
3
+ size 14180
checkpoint-4122/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8d7cc5a88feb508f7970570ff47fdb4c737f262022a3bae1d220c4e6160b7d2
3
+ size 1064
checkpoint-4122/special_tokens_map.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": "</s>",
22
+ "unk_token": {
23
+ "content": "<unk>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ }
29
+ }
checkpoint-4122/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4122/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
checkpoint-4122/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [
29
+ "<unk>",
30
+ "<s>",
31
+ "</s>"
32
+ ],
33
+ "bos_token": "<s>",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "</s>",
36
+ "legacy": true,
37
+ "model_max_length": 2048,
38
+ "pad_token": "</s>",
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "tokenizer_class": "LlamaTokenizer",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": true
44
+ }
checkpoint-4122/trainer_state.json ADDED
@@ -0,0 +1,1003 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.3435,
5
+ "eval_steps": 500,
6
+ "global_step": 4122,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "learning_rate": 3.833333333333334e-06,
14
+ "loss": 1.5323,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.01,
19
+ "learning_rate": 8.000000000000001e-06,
20
+ "loss": 1.5524,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.02,
25
+ "learning_rate": 1.2166666666666668e-05,
26
+ "loss": 1.4547,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.03,
31
+ "learning_rate": 1.6333333333333335e-05,
32
+ "loss": 1.4912,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.03,
37
+ "learning_rate": 2.05e-05,
38
+ "loss": 1.4495,
39
+ "step": 125
40
+ },
41
+ {
42
+ "epoch": 0.04,
43
+ "learning_rate": 2.466666666666667e-05,
44
+ "loss": 1.5684,
45
+ "step": 150
46
+ },
47
+ {
48
+ "epoch": 0.04,
49
+ "learning_rate": 2.8833333333333334e-05,
50
+ "loss": 1.4968,
51
+ "step": 175
52
+ },
53
+ {
54
+ "epoch": 0.05,
55
+ "learning_rate": 3.3e-05,
56
+ "loss": 1.502,
57
+ "step": 200
58
+ },
59
+ {
60
+ "epoch": 0.06,
61
+ "learning_rate": 3.7166666666666664e-05,
62
+ "loss": 1.5562,
63
+ "step": 225
64
+ },
65
+ {
66
+ "epoch": 0.06,
67
+ "learning_rate": 4.133333333333333e-05,
68
+ "loss": 1.4938,
69
+ "step": 250
70
+ },
71
+ {
72
+ "epoch": 0.07,
73
+ "learning_rate": 4.55e-05,
74
+ "loss": 1.5588,
75
+ "step": 275
76
+ },
77
+ {
78
+ "epoch": 0.07,
79
+ "learning_rate": 4.966666666666667e-05,
80
+ "loss": 1.4588,
81
+ "step": 300
82
+ },
83
+ {
84
+ "epoch": 0.08,
85
+ "learning_rate": 5.383333333333334e-05,
86
+ "loss": 1.5069,
87
+ "step": 325
88
+ },
89
+ {
90
+ "epoch": 0.09,
91
+ "learning_rate": 5.8e-05,
92
+ "loss": 1.4487,
93
+ "step": 350
94
+ },
95
+ {
96
+ "epoch": 0.09,
97
+ "learning_rate": 6.216666666666667e-05,
98
+ "loss": 1.4181,
99
+ "step": 375
100
+ },
101
+ {
102
+ "epoch": 0.1,
103
+ "learning_rate": 6.633333333333334e-05,
104
+ "loss": 1.4582,
105
+ "step": 400
106
+ },
107
+ {
108
+ "epoch": 0.11,
109
+ "learning_rate": 7.033333333333334e-05,
110
+ "loss": 1.4703,
111
+ "step": 425
112
+ },
113
+ {
114
+ "epoch": 0.11,
115
+ "learning_rate": 7.450000000000001e-05,
116
+ "loss": 1.4574,
117
+ "step": 450
118
+ },
119
+ {
120
+ "epoch": 0.12,
121
+ "learning_rate": 7.866666666666666e-05,
122
+ "loss": 1.4634,
123
+ "step": 475
124
+ },
125
+ {
126
+ "epoch": 0.12,
127
+ "learning_rate": 8.283333333333335e-05,
128
+ "loss": 1.4442,
129
+ "step": 500
130
+ },
131
+ {
132
+ "epoch": 0.13,
133
+ "learning_rate": 8.7e-05,
134
+ "loss": 1.4699,
135
+ "step": 525
136
+ },
137
+ {
138
+ "epoch": 0.14,
139
+ "learning_rate": 9.116666666666667e-05,
140
+ "loss": 1.3843,
141
+ "step": 550
142
+ },
143
+ {
144
+ "epoch": 0.14,
145
+ "learning_rate": 9.533333333333334e-05,
146
+ "loss": 1.3936,
147
+ "step": 575
148
+ },
149
+ {
150
+ "epoch": 0.15,
151
+ "learning_rate": 9.95e-05,
152
+ "loss": 1.4198,
153
+ "step": 600
154
+ },
155
+ {
156
+ "epoch": 0.16,
157
+ "learning_rate": 0.00010366666666666666,
158
+ "loss": 1.408,
159
+ "step": 625
160
+ },
161
+ {
162
+ "epoch": 0.16,
163
+ "learning_rate": 0.00010783333333333334,
164
+ "loss": 1.3788,
165
+ "step": 650
166
+ },
167
+ {
168
+ "epoch": 0.17,
169
+ "learning_rate": 0.00011200000000000001,
170
+ "loss": 1.4037,
171
+ "step": 675
172
+ },
173
+ {
174
+ "epoch": 0.17,
175
+ "learning_rate": 0.00011616666666666667,
176
+ "loss": 1.374,
177
+ "step": 700
178
+ },
179
+ {
180
+ "epoch": 0.18,
181
+ "learning_rate": 0.00012033333333333335,
182
+ "loss": 1.3996,
183
+ "step": 725
184
+ },
185
+ {
186
+ "epoch": 0.19,
187
+ "learning_rate": 0.00012450000000000002,
188
+ "loss": 1.3591,
189
+ "step": 750
190
+ },
191
+ {
192
+ "epoch": 0.19,
193
+ "learning_rate": 0.00012866666666666666,
194
+ "loss": 1.3733,
195
+ "step": 775
196
+ },
197
+ {
198
+ "epoch": 0.2,
199
+ "learning_rate": 0.00013283333333333335,
200
+ "loss": 1.3037,
201
+ "step": 800
202
+ },
203
+ {
204
+ "epoch": 0.21,
205
+ "learning_rate": 0.00013700000000000002,
206
+ "loss": 1.3285,
207
+ "step": 825
208
+ },
209
+ {
210
+ "epoch": 0.21,
211
+ "learning_rate": 0.00014116666666666666,
212
+ "loss": 1.3496,
213
+ "step": 850
214
+ },
215
+ {
216
+ "epoch": 0.22,
217
+ "learning_rate": 0.00014533333333333333,
218
+ "loss": 1.3337,
219
+ "step": 875
220
+ },
221
+ {
222
+ "epoch": 0.23,
223
+ "learning_rate": 0.00014950000000000003,
224
+ "loss": 1.3605,
225
+ "step": 900
226
+ },
227
+ {
228
+ "epoch": 0.23,
229
+ "learning_rate": 0.00015366666666666667,
230
+ "loss": 1.302,
231
+ "step": 925
232
+ },
233
+ {
234
+ "epoch": 0.24,
235
+ "learning_rate": 0.00015783333333333334,
236
+ "loss": 1.3118,
237
+ "step": 950
238
+ },
239
+ {
240
+ "epoch": 0.24,
241
+ "learning_rate": 0.000162,
242
+ "loss": 1.3104,
243
+ "step": 975
244
+ },
245
+ {
246
+ "epoch": 0.25,
247
+ "learning_rate": 0.00016616666666666668,
248
+ "loss": 1.2881,
249
+ "step": 1000
250
+ },
251
+ {
252
+ "epoch": 0.26,
253
+ "learning_rate": 0.00017033333333333334,
254
+ "loss": 1.2606,
255
+ "step": 1025
256
+ },
257
+ {
258
+ "epoch": 0.26,
259
+ "learning_rate": 0.0001745,
260
+ "loss": 1.2912,
261
+ "step": 1050
262
+ },
263
+ {
264
+ "epoch": 0.27,
265
+ "learning_rate": 0.00017866666666666668,
266
+ "loss": 1.2651,
267
+ "step": 1075
268
+ },
269
+ {
270
+ "epoch": 0.28,
271
+ "learning_rate": 0.00018283333333333335,
272
+ "loss": 1.3028,
273
+ "step": 1100
274
+ },
275
+ {
276
+ "epoch": 0.28,
277
+ "learning_rate": 0.00018700000000000002,
278
+ "loss": 1.2867,
279
+ "step": 1125
280
+ },
281
+ {
282
+ "epoch": 0.29,
283
+ "learning_rate": 0.00019116666666666666,
284
+ "loss": 1.2531,
285
+ "step": 1150
286
+ },
287
+ {
288
+ "epoch": 0.29,
289
+ "learning_rate": 0.00019533333333333336,
290
+ "loss": 1.2456,
291
+ "step": 1175
292
+ },
293
+ {
294
+ "epoch": 0.3,
295
+ "learning_rate": 0.00019950000000000002,
296
+ "loss": 1.2449,
297
+ "step": 1200
298
+ },
299
+ {
300
+ "epoch": 0.31,
301
+ "learning_rate": 0.00019999795230099993,
302
+ "loss": 1.2648,
303
+ "step": 1225
304
+ },
305
+ {
306
+ "epoch": 0.31,
307
+ "learning_rate": 0.00019999065431390242,
308
+ "loss": 1.2348,
309
+ "step": 1250
310
+ },
311
+ {
312
+ "epoch": 0.32,
313
+ "learning_rate": 0.00019997806834748456,
314
+ "loss": 1.2385,
315
+ "step": 1275
316
+ },
317
+ {
318
+ "epoch": 0.33,
319
+ "learning_rate": 0.00019996019506735184,
320
+ "loss": 1.2761,
321
+ "step": 1300
322
+ },
323
+ {
324
+ "epoch": 0.33,
325
+ "learning_rate": 0.00019993703541872795,
326
+ "loss": 1.2532,
327
+ "step": 1325
328
+ },
329
+ {
330
+ "epoch": 0.34,
331
+ "learning_rate": 0.00019990859062640477,
332
+ "loss": 1.2478,
333
+ "step": 1350
334
+ },
335
+ {
336
+ "epoch": 1.0,
337
+ "learning_rate": 0.00019987486219467764,
338
+ "loss": 1.2108,
339
+ "step": 1375
340
+ },
341
+ {
342
+ "epoch": 1.01,
343
+ "learning_rate": 0.0001998358519072658,
344
+ "loss": 1.209,
345
+ "step": 1400
346
+ },
347
+ {
348
+ "epoch": 1.01,
349
+ "learning_rate": 0.0001997915618272179,
350
+ "loss": 1.237,
351
+ "step": 1425
352
+ },
353
+ {
354
+ "epoch": 1.02,
355
+ "learning_rate": 0.0001997419942968032,
356
+ "loss": 1.298,
357
+ "step": 1450
358
+ },
359
+ {
360
+ "epoch": 1.03,
361
+ "learning_rate": 0.00019968715193738738,
362
+ "loss": 1.1897,
363
+ "step": 1475
364
+ },
365
+ {
366
+ "epoch": 1.03,
367
+ "learning_rate": 0.0001996295434028785,
368
+ "loss": 1.1999,
369
+ "step": 1500
370
+ },
371
+ {
372
+ "epoch": 1.04,
373
+ "learning_rate": 0.00019956437105068297,
374
+ "loss": 1.1667,
375
+ "step": 1525
376
+ },
377
+ {
378
+ "epoch": 1.04,
379
+ "learning_rate": 0.00019949393326304478,
380
+ "loss": 1.2099,
381
+ "step": 1550
382
+ },
383
+ {
384
+ "epoch": 1.05,
385
+ "learning_rate": 0.0001994182337650477,
386
+ "loss": 1.21,
387
+ "step": 1575
388
+ },
389
+ {
390
+ "epoch": 1.06,
391
+ "learning_rate": 0.00019933727656003963,
392
+ "loss": 1.1651,
393
+ "step": 1600
394
+ },
395
+ {
396
+ "epoch": 1.06,
397
+ "learning_rate": 0.00019925106592942103,
398
+ "loss": 1.1462,
399
+ "step": 1625
400
+ },
401
+ {
402
+ "epoch": 1.07,
403
+ "learning_rate": 0.00019915960643241825,
404
+ "loss": 1.1849,
405
+ "step": 1650
406
+ },
407
+ {
408
+ "epoch": 1.08,
409
+ "learning_rate": 0.00019906290290584258,
410
+ "loss": 1.1645,
411
+ "step": 1675
412
+ },
413
+ {
414
+ "epoch": 1.08,
415
+ "learning_rate": 0.00019896096046383456,
416
+ "loss": 1.1824,
417
+ "step": 1700
418
+ },
419
+ {
420
+ "epoch": 1.09,
421
+ "learning_rate": 0.00019885378449759316,
422
+ "loss": 1.15,
423
+ "step": 1725
424
+ },
425
+ {
426
+ "epoch": 1.09,
427
+ "learning_rate": 0.0001987459771291654,
428
+ "loss": 1.2295,
429
+ "step": 1750
430
+ },
431
+ {
432
+ "epoch": 1.1,
433
+ "learning_rate": 0.00019862856015372317,
434
+ "loss": 1.15,
435
+ "step": 1775
436
+ },
437
+ {
438
+ "epoch": 1.11,
439
+ "learning_rate": 0.0001985059272329501,
440
+ "loss": 1.1641,
441
+ "step": 1800
442
+ },
443
+ {
444
+ "epoch": 1.11,
445
+ "learning_rate": 0.0001983780848522559,
446
+ "loss": 1.2108,
447
+ "step": 1825
448
+ },
449
+ {
450
+ "epoch": 1.12,
451
+ "learning_rate": 0.00019824503977255097,
452
+ "loss": 1.1669,
453
+ "step": 1850
454
+ },
455
+ {
456
+ "epoch": 1.13,
457
+ "learning_rate": 0.00019810679902988927,
458
+ "loss": 1.1874,
459
+ "step": 1875
460
+ },
461
+ {
462
+ "epoch": 1.13,
463
+ "learning_rate": 0.00019796336993509588,
464
+ "loss": 1.0834,
465
+ "step": 1900
466
+ },
467
+ {
468
+ "epoch": 1.14,
469
+ "learning_rate": 0.00019781476007338058,
470
+ "loss": 1.2298,
471
+ "step": 1925
472
+ },
473
+ {
474
+ "epoch": 1.14,
475
+ "learning_rate": 0.0001976609773039366,
476
+ "loss": 1.1745,
477
+ "step": 1950
478
+ },
479
+ {
480
+ "epoch": 1.15,
481
+ "learning_rate": 0.00019750202975952507,
482
+ "loss": 1.1523,
483
+ "step": 1975
484
+ },
485
+ {
486
+ "epoch": 1.16,
487
+ "learning_rate": 0.00019733792584604486,
488
+ "loss": 1.1434,
489
+ "step": 2000
490
+ },
491
+ {
492
+ "epoch": 1.16,
493
+ "learning_rate": 0.00019716867424208806,
494
+ "loss": 1.1493,
495
+ "step": 2025
496
+ },
497
+ {
498
+ "epoch": 1.17,
499
+ "learning_rate": 0.00019699428389848093,
500
+ "loss": 1.1317,
501
+ "step": 2050
502
+ },
503
+ {
504
+ "epoch": 1.18,
505
+ "learning_rate": 0.0001968147640378108,
506
+ "loss": 1.1427,
507
+ "step": 2075
508
+ },
509
+ {
510
+ "epoch": 1.18,
511
+ "learning_rate": 0.00019663760793213296,
512
+ "loss": 1.1638,
513
+ "step": 2100
514
+ },
515
+ {
516
+ "epoch": 1.19,
517
+ "learning_rate": 0.000196448062009159,
518
+ "loss": 1.1219,
519
+ "step": 2125
520
+ },
521
+ {
522
+ "epoch": 1.19,
523
+ "learning_rate": 0.00019625341545592226,
524
+ "loss": 1.1685,
525
+ "step": 2150
526
+ },
527
+ {
528
+ "epoch": 1.2,
529
+ "learning_rate": 0.0001960536785662542,
530
+ "loss": 1.1434,
531
+ "step": 2175
532
+ },
533
+ {
534
+ "epoch": 1.21,
535
+ "learning_rate": 0.00019584886190318742,
536
+ "loss": 1.098,
537
+ "step": 2200
538
+ },
539
+ {
540
+ "epoch": 1.21,
541
+ "learning_rate": 0.000195638976298397,
542
+ "loss": 1.1473,
543
+ "step": 2225
544
+ },
545
+ {
546
+ "epoch": 1.22,
547
+ "learning_rate": 0.0001954240328516277,
548
+ "loss": 1.1443,
549
+ "step": 2250
550
+ },
551
+ {
552
+ "epoch": 1.23,
553
+ "learning_rate": 0.00019520404293010683,
554
+ "loss": 1.137,
555
+ "step": 2275
556
+ },
557
+ {
558
+ "epoch": 1.23,
559
+ "learning_rate": 0.0001949790181679433,
560
+ "loss": 1.1211,
561
+ "step": 2300
562
+ },
563
+ {
564
+ "epoch": 1.24,
565
+ "learning_rate": 0.00019474897046551208,
566
+ "loss": 1.0878,
567
+ "step": 2325
568
+ },
569
+ {
570
+ "epoch": 1.24,
571
+ "learning_rate": 0.00019451391198882517,
572
+ "loss": 1.1113,
573
+ "step": 2350
574
+ },
575
+ {
576
+ "epoch": 1.25,
577
+ "learning_rate": 0.00019427385516888798,
578
+ "loss": 1.1411,
579
+ "step": 2375
580
+ },
581
+ {
582
+ "epoch": 1.26,
583
+ "learning_rate": 0.0001940288127010419,
584
+ "loss": 1.0948,
585
+ "step": 2400
586
+ },
587
+ {
588
+ "epoch": 1.26,
589
+ "learning_rate": 0.0001937787975442931,
590
+ "loss": 1.0966,
591
+ "step": 2425
592
+ },
593
+ {
594
+ "epoch": 1.27,
595
+ "learning_rate": 0.0001935238229206271,
596
+ "loss": 1.1556,
597
+ "step": 2450
598
+ },
599
+ {
600
+ "epoch": 1.28,
601
+ "learning_rate": 0.00019326390231430942,
602
+ "loss": 1.1422,
603
+ "step": 2475
604
+ },
605
+ {
606
+ "epoch": 1.28,
607
+ "learning_rate": 0.0001929990494711726,
608
+ "loss": 1.1278,
609
+ "step": 2500
610
+ },
611
+ {
612
+ "epoch": 1.29,
613
+ "learning_rate": 0.00019272927839788929,
614
+ "loss": 1.0958,
615
+ "step": 2525
616
+ },
617
+ {
618
+ "epoch": 1.29,
619
+ "learning_rate": 0.00019245460336123134,
620
+ "loss": 1.1172,
621
+ "step": 2550
622
+ },
623
+ {
624
+ "epoch": 1.3,
625
+ "learning_rate": 0.00019217503888731546,
626
+ "loss": 1.1393,
627
+ "step": 2575
628
+ },
629
+ {
630
+ "epoch": 1.31,
631
+ "learning_rate": 0.00019189059976083492,
632
+ "loss": 1.1054,
633
+ "step": 2600
634
+ },
635
+ {
636
+ "epoch": 1.31,
637
+ "learning_rate": 0.00019160130102427778,
638
+ "loss": 1.1073,
639
+ "step": 2625
640
+ },
641
+ {
642
+ "epoch": 1.32,
643
+ "learning_rate": 0.00019130715797713123,
644
+ "loss": 1.1802,
645
+ "step": 2650
646
+ },
647
+ {
648
+ "epoch": 1.33,
649
+ "learning_rate": 0.0001910081861750726,
650
+ "loss": 1.1162,
651
+ "step": 2675
652
+ },
653
+ {
654
+ "epoch": 1.33,
655
+ "learning_rate": 0.0001907044014291465,
656
+ "loss": 1.1289,
657
+ "step": 2700
658
+ },
659
+ {
660
+ "epoch": 1.34,
661
+ "learning_rate": 0.00019039581980492902,
662
+ "loss": 1.0892,
663
+ "step": 2725
664
+ },
665
+ {
666
+ "epoch": 2.0,
667
+ "learning_rate": 0.00019008245762167772,
668
+ "loss": 1.1055,
669
+ "step": 2750
670
+ },
671
+ {
672
+ "epoch": 2.01,
673
+ "learning_rate": 0.00018976433145146885,
674
+ "loss": 1.0787,
675
+ "step": 2775
676
+ },
677
+ {
678
+ "epoch": 2.01,
679
+ "learning_rate": 0.0001894414581183208,
680
+ "loss": 1.0862,
681
+ "step": 2800
682
+ },
683
+ {
684
+ "epoch": 2.02,
685
+ "learning_rate": 0.00018911385469730444,
686
+ "loss": 1.0763,
687
+ "step": 2825
688
+ },
689
+ {
690
+ "epoch": 2.03,
691
+ "learning_rate": 0.00018878153851364013,
692
+ "loss": 1.1034,
693
+ "step": 2850
694
+ },
695
+ {
696
+ "epoch": 2.03,
697
+ "learning_rate": 0.00018844452714178136,
698
+ "loss": 1.0982,
699
+ "step": 2875
700
+ },
701
+ {
702
+ "epoch": 2.04,
703
+ "learning_rate": 0.00018810283840448545,
704
+ "loss": 1.0915,
705
+ "step": 2900
706
+ },
707
+ {
708
+ "epoch": 2.04,
709
+ "learning_rate": 0.00018775649037187093,
710
+ "loss": 1.0945,
711
+ "step": 2925
712
+ },
713
+ {
714
+ "epoch": 2.05,
715
+ "learning_rate": 0.00018740550136046196,
716
+ "loss": 1.1061,
717
+ "step": 2950
718
+ },
719
+ {
720
+ "epoch": 2.06,
721
+ "learning_rate": 0.00018704988993221964,
722
+ "loss": 1.1178,
723
+ "step": 2975
724
+ },
725
+ {
726
+ "epoch": 2.06,
727
+ "learning_rate": 0.00018668967489356028,
728
+ "loss": 1.0524,
729
+ "step": 3000
730
+ },
731
+ {
732
+ "epoch": 2.07,
733
+ "learning_rate": 0.00018632487529436105,
734
+ "loss": 1.1349,
735
+ "step": 3025
736
+ },
737
+ {
738
+ "epoch": 2.08,
739
+ "learning_rate": 0.00018595551042695227,
740
+ "loss": 1.0893,
741
+ "step": 3050
742
+ },
743
+ {
744
+ "epoch": 2.08,
745
+ "learning_rate": 0.00018558159982509732,
746
+ "loss": 1.0633,
747
+ "step": 3075
748
+ },
749
+ {
750
+ "epoch": 2.09,
751
+ "learning_rate": 0.00018520316326295957,
752
+ "loss": 1.1066,
753
+ "step": 3100
754
+ },
755
+ {
756
+ "epoch": 2.09,
757
+ "learning_rate": 0.00018482022075405648,
758
+ "loss": 1.1457,
759
+ "step": 3125
760
+ },
761
+ {
762
+ "epoch": 2.1,
763
+ "learning_rate": 0.00018443279255020152,
764
+ "loss": 1.0521,
765
+ "step": 3150
766
+ },
767
+ {
768
+ "epoch": 2.11,
769
+ "learning_rate": 0.00018404089914043272,
770
+ "loss": 1.0651,
771
+ "step": 3175
772
+ },
773
+ {
774
+ "epoch": 2.11,
775
+ "learning_rate": 0.00018364456124992954,
776
+ "loss": 1.0912,
777
+ "step": 3200
778
+ },
779
+ {
780
+ "epoch": 2.12,
781
+ "learning_rate": 0.0001832437998389165,
782
+ "loss": 1.1053,
783
+ "step": 3225
784
+ },
785
+ {
786
+ "epoch": 2.13,
787
+ "learning_rate": 0.00018283863610155487,
788
+ "loss": 1.0853,
789
+ "step": 3250
790
+ },
791
+ {
792
+ "epoch": 2.13,
793
+ "learning_rate": 0.00018242909146482187,
794
+ "loss": 1.105,
795
+ "step": 3275
796
+ },
797
+ {
798
+ "epoch": 2.14,
799
+ "learning_rate": 0.00018201518758737724,
800
+ "loss": 1.0751,
801
+ "step": 3300
802
+ },
803
+ {
804
+ "epoch": 2.14,
805
+ "learning_rate": 0.00018159694635841819,
806
+ "loss": 1.1086,
807
+ "step": 3325
808
+ },
809
+ {
810
+ "epoch": 2.15,
811
+ "learning_rate": 0.0001811743898965215,
812
+ "loss": 1.0675,
813
+ "step": 3350
814
+ },
815
+ {
816
+ "epoch": 2.16,
817
+ "learning_rate": 0.000180747540548474,
818
+ "loss": 1.0697,
819
+ "step": 3375
820
+ },
821
+ {
822
+ "epoch": 2.16,
823
+ "learning_rate": 0.00018031642088809063,
824
+ "loss": 1.1664,
825
+ "step": 3400
826
+ },
827
+ {
828
+ "epoch": 2.17,
829
+ "learning_rate": 0.00017988105371502059,
830
+ "loss": 1.0677,
831
+ "step": 3425
832
+ },
833
+ {
834
+ "epoch": 2.18,
835
+ "learning_rate": 0.00017944146205354182,
836
+ "loss": 1.0478,
837
+ "step": 3450
838
+ },
839
+ {
840
+ "epoch": 2.18,
841
+ "learning_rate": 0.00017899766915134308,
842
+ "loss": 1.0764,
843
+ "step": 3475
844
+ },
845
+ {
846
+ "epoch": 2.19,
847
+ "learning_rate": 0.0001785496984782947,
848
+ "loss": 1.0744,
849
+ "step": 3500
850
+ },
851
+ {
852
+ "epoch": 2.19,
853
+ "learning_rate": 0.0001780975737252073,
854
+ "loss": 1.1379,
855
+ "step": 3525
856
+ },
857
+ {
858
+ "epoch": 2.2,
859
+ "learning_rate": 0.0001776413188025789,
860
+ "loss": 1.1323,
861
+ "step": 3550
862
+ },
863
+ {
864
+ "epoch": 2.21,
865
+ "learning_rate": 0.00017718095783933054,
866
+ "loss": 1.1317,
867
+ "step": 3575
868
+ },
869
+ {
870
+ "epoch": 2.21,
871
+ "learning_rate": 0.00017671651518153,
872
+ "loss": 1.0786,
873
+ "step": 3600
874
+ },
875
+ {
876
+ "epoch": 2.22,
877
+ "learning_rate": 0.00017624801539110443,
878
+ "loss": 1.0549,
879
+ "step": 3625
880
+ },
881
+ {
882
+ "epoch": 2.23,
883
+ "learning_rate": 0.00017577548324454147,
884
+ "loss": 1.1106,
885
+ "step": 3650
886
+ },
887
+ {
888
+ "epoch": 2.23,
889
+ "learning_rate": 0.00017529894373157875,
890
+ "loss": 1.0852,
891
+ "step": 3675
892
+ },
893
+ {
894
+ "epoch": 2.24,
895
+ "learning_rate": 0.00017481842205388244,
896
+ "loss": 1.1003,
897
+ "step": 3700
898
+ },
899
+ {
900
+ "epoch": 2.24,
901
+ "learning_rate": 0.0001743339436237144,
902
+ "loss": 1.0682,
903
+ "step": 3725
904
+ },
905
+ {
906
+ "epoch": 2.25,
907
+ "learning_rate": 0.00017384553406258842,
908
+ "loss": 1.0729,
909
+ "step": 3750
910
+ },
911
+ {
912
+ "epoch": 2.26,
913
+ "learning_rate": 0.0001733532191999149,
914
+ "loss": 1.0679,
915
+ "step": 3775
916
+ },
917
+ {
918
+ "epoch": 2.26,
919
+ "learning_rate": 0.00017285702507163534,
920
+ "loss": 1.028,
921
+ "step": 3800
922
+ },
923
+ {
924
+ "epoch": 2.27,
925
+ "learning_rate": 0.00017235697791884494,
926
+ "loss": 1.0769,
927
+ "step": 3825
928
+ },
929
+ {
930
+ "epoch": 2.28,
931
+ "learning_rate": 0.00017185310418640524,
932
+ "loss": 1.0521,
933
+ "step": 3850
934
+ },
935
+ {
936
+ "epoch": 2.28,
937
+ "learning_rate": 0.00017134543052154538,
938
+ "loss": 1.0768,
939
+ "step": 3875
940
+ },
941
+ {
942
+ "epoch": 2.29,
943
+ "learning_rate": 0.0001708339837724529,
944
+ "loss": 1.0611,
945
+ "step": 3900
946
+ },
947
+ {
948
+ "epoch": 2.29,
949
+ "learning_rate": 0.00017031879098685393,
950
+ "loss": 1.0694,
951
+ "step": 3925
952
+ },
953
+ {
954
+ "epoch": 2.3,
955
+ "learning_rate": 0.00016979987941058273,
956
+ "loss": 1.067,
957
+ "step": 3950
958
+ },
959
+ {
960
+ "epoch": 2.31,
961
+ "learning_rate": 0.00016927727648614085,
962
+ "loss": 1.0586,
963
+ "step": 3975
964
+ },
965
+ {
966
+ "epoch": 2.31,
967
+ "learning_rate": 0.0001687510098512458,
968
+ "loss": 1.0337,
969
+ "step": 4000
970
+ },
971
+ {
972
+ "epoch": 2.32,
973
+ "learning_rate": 0.0001682211073373694,
974
+ "loss": 1.0501,
975
+ "step": 4025
976
+ },
977
+ {
978
+ "epoch": 2.33,
979
+ "learning_rate": 0.00016768759696826608,
980
+ "loss": 1.0818,
981
+ "step": 4050
982
+ },
983
+ {
984
+ "epoch": 2.33,
985
+ "learning_rate": 0.00016715050695849067,
986
+ "loss": 1.0548,
987
+ "step": 4075
988
+ },
989
+ {
990
+ "epoch": 2.34,
991
+ "learning_rate": 0.00016660986571190626,
992
+ "loss": 1.0919,
993
+ "step": 4100
994
+ }
995
+ ],
996
+ "logging_steps": 25,
997
+ "max_steps": 12000,
998
+ "num_train_epochs": 3,
999
+ "save_steps": 500,
1000
+ "total_flos": 3.608534619248394e+17,
1001
+ "trial_name": null,
1002
+ "trial_params": null
1003
+ }
checkpoint-4122/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41e972bf5b6ffc690e7c0a26398cdcf74137de64b9f31441a70c7befc7cc1e61
3
+ size 4472
special_tokens_map.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": "</s>",
22
+ "unk_token": {
23
+ "content": "<unk>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ }
29
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [
29
+ "<unk>",
30
+ "<s>",
31
+ "</s>"
32
+ ],
33
+ "bos_token": "<s>",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "</s>",
36
+ "legacy": true,
37
+ "model_max_length": 2048,
38
+ "pad_token": "</s>",
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "tokenizer_class": "LlamaTokenizer",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": true
44
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41e972bf5b6ffc690e7c0a26398cdcf74137de64b9f31441a70c7befc7cc1e61
3
+ size 4472
training_params.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "Intel/neural-chat-7b-v3-1", "data_path": ".", "project_name": "neu-sai-fine-msz4", "train_split": "train", "valid_split": null, "text_column": "text", "rejected_text_column": "rejected", "lr": 0.0002, "epochs": 3, "batch_size": 1, "warmup_ratio": 0.1, "gradient_accumulation": 1, "optimizer": "sgd", "scheduler": "cosine", "weight_decay": 0.0, "max_grad_norm": 1.0, "seed": 42, "add_eos_token": false, "block_size": 2048, "use_peft": true, "lora_r": 16, "lora_alpha": 32, "lora_dropout": 0.05, "logging_steps": 25, "evaluation_strategy": "epoch", "save_total_limit": 1, "save_strategy": "epoch", "auto_find_batch_size": false, "fp16": true, "push_to_hub": true, "use_int8": true, "model_max_length": 2048, "repo_id": "CoruNethron/neu-sai-fine-it1-msz4", "use_int4": false, "trainer": "sft", "target_modules": "q_proj,v_proj,k_proj,o_proj", "merge_adapter": false, "username": null, "use_flash_attention_2": true, "log": "none", "disable_gradient_checkpointing": false, "model_ref": null, "dpo_beta": 0.1, "prompt_text_column": "prompt"}