shyam-incedoinc commited on
Commit
27e9eeb
β€’
1 Parent(s): 7d2bd17

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-generation
5
+ widget:
6
+ - text: "I love AutoTrain because "
7
+ ---
8
+
9
+ # Model Trained Using AutoTrain
adapter_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "codellama/CodeLlama-7b-Instruct-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "v_proj"
21
+ ],
22
+ "task_type": "CAUSAL_LM"
23
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61f61cdb3612975c552c336d51b12241684adad41f292a41337dc16f9b432d4b
3
+ size 33600461
checkpoint-90/README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float16
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: False
29
+ - bnb_4bit_compute_dtype: float16
30
+ ### Framework versions
31
+
32
+ - PEFT 0.6.0.dev0
33
+
34
+ - PEFT 0.6.0.dev0
checkpoint-90/adapter_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "codellama/CodeLlama-7b-Instruct-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "v_proj"
21
+ ],
22
+ "task_type": "CAUSAL_LM"
23
+ }
checkpoint-90/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61f61cdb3612975c552c336d51b12241684adad41f292a41337dc16f9b432d4b
3
+ size 33600461
checkpoint-90/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb96f88885dffed4ff81b248a0a36f20f18d1b57624b3efc5ff0e5d215550507
3
+ size 67216581
checkpoint-90/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dcb546a6f00cf7b1d2e7543e2efb12f502c2d5d7264810481ff219cd36e123f
3
+ size 14575
checkpoint-90/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c19939c0d126a31cccdba0e576c8f635f41c74e71b770144f1e2717faf2bc703
3
+ size 627
checkpoint-90/special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "▁<PRE>",
4
+ "▁<MID>",
5
+ "▁<SUF>",
6
+ "▁<EOT>"
7
+ ],
8
+ "bos_token": "<s>",
9
+ "eos_token": "</s>",
10
+ "pad_token": "</s>",
11
+ "unk_token": "<unk>"
12
+ }
checkpoint-90/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-90/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ccb9c8b6b561889acea59191d66986d314e7cbd6a78abc6e49b139ca91c1e6
3
+ size 500058
checkpoint-90/tokenizer_config.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "32007": {
28
+ "content": "▁<PRE>",
29
+ "lstrip": true,
30
+ "normalized": false,
31
+ "rstrip": true,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "32008": {
36
+ "content": "▁<SUF>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": true,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "32009": {
44
+ "content": "▁<MID>",
45
+ "lstrip": true,
46
+ "normalized": false,
47
+ "rstrip": true,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "32010": {
52
+ "content": "▁<EOT>",
53
+ "lstrip": true,
54
+ "normalized": false,
55
+ "rstrip": true,
56
+ "single_word": false,
57
+ "special": true
58
+ }
59
+ },
60
+ "additional_special_tokens": [
61
+ "▁<PRE>",
62
+ "▁<MID>",
63
+ "▁<SUF>",
64
+ "▁<EOT>"
65
+ ],
66
+ "bos_token": "<s>",
67
+ "clean_up_tokenization_spaces": false,
68
+ "eos_token": "</s>",
69
+ "eot_token": "▁<EOT>",
70
+ "fill_token": "<FILL_ME>",
71
+ "legacy": null,
72
+ "middle_token": "▁<MID>",
73
+ "model_max_length": 1024,
74
+ "pad_token": null,
75
+ "prefix_token": "▁<PRE>",
76
+ "sp_model_kwargs": {},
77
+ "suffix_token": "▁<SUF>",
78
+ "tokenizer_class": "CodeLlamaTokenizer",
79
+ "unk_token": "<unk>",
80
+ "use_default_system_prompt": false
81
+ }
checkpoint-90/trainer_state.json ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6403456926345825,
3
+ "best_model_checkpoint": "qa-model-finetune/checkpoint-90",
4
+ "epoch": 4.151260504201681,
5
+ "eval_steps": 500,
6
+ "global_step": 90,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 6.666666666666667e-06,
14
+ "loss": 1.4273,
15
+ "step": 2
16
+ },
17
+ {
18
+ "epoch": 0.03,
19
+ "learning_rate": 1.3333333333333333e-05,
20
+ "loss": 1.3778,
21
+ "step": 4
22
+ },
23
+ {
24
+ "epoch": 0.05,
25
+ "learning_rate": 2e-05,
26
+ "loss": 1.3453,
27
+ "step": 6
28
+ },
29
+ {
30
+ "epoch": 0.07,
31
+ "learning_rate": 2.6666666666666667e-05,
32
+ "loss": 1.3079,
33
+ "step": 8
34
+ },
35
+ {
36
+ "epoch": 0.08,
37
+ "learning_rate": 3.3333333333333335e-05,
38
+ "loss": 1.32,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.1,
43
+ "learning_rate": 4e-05,
44
+ "loss": 1.2608,
45
+ "step": 12
46
+ },
47
+ {
48
+ "epoch": 0.12,
49
+ "learning_rate": 4.666666666666667e-05,
50
+ "loss": 1.1844,
51
+ "step": 14
52
+ },
53
+ {
54
+ "epoch": 0.13,
55
+ "learning_rate": 5.333333333333333e-05,
56
+ "loss": 1.2725,
57
+ "step": 16
58
+ },
59
+ {
60
+ "epoch": 0.15,
61
+ "learning_rate": 6e-05,
62
+ "loss": 1.2058,
63
+ "step": 18
64
+ },
65
+ {
66
+ "epoch": 0.15,
67
+ "eval_loss": 1.1214113235473633,
68
+ "eval_runtime": 30.7852,
69
+ "eval_samples_per_second": 1.624,
70
+ "eval_steps_per_second": 0.422,
71
+ "step": 18
72
+ },
73
+ {
74
+ "epoch": 1.02,
75
+ "learning_rate": 6.666666666666667e-05,
76
+ "loss": 1.1389,
77
+ "step": 20
78
+ },
79
+ {
80
+ "epoch": 1.03,
81
+ "learning_rate": 7.333333333333333e-05,
82
+ "loss": 1.1408,
83
+ "step": 22
84
+ },
85
+ {
86
+ "epoch": 1.05,
87
+ "learning_rate": 8e-05,
88
+ "loss": 1.0969,
89
+ "step": 24
90
+ },
91
+ {
92
+ "epoch": 1.07,
93
+ "learning_rate": 8.666666666666667e-05,
94
+ "loss": 1.0436,
95
+ "step": 26
96
+ },
97
+ {
98
+ "epoch": 1.08,
99
+ "learning_rate": 9.333333333333334e-05,
100
+ "loss": 1.0753,
101
+ "step": 28
102
+ },
103
+ {
104
+ "epoch": 1.1,
105
+ "learning_rate": 0.0001,
106
+ "loss": 1.0507,
107
+ "step": 30
108
+ },
109
+ {
110
+ "epoch": 1.12,
111
+ "learning_rate": 0.00010666666666666667,
112
+ "loss": 1.021,
113
+ "step": 32
114
+ },
115
+ {
116
+ "epoch": 1.13,
117
+ "learning_rate": 0.00011333333333333334,
118
+ "loss": 1.0122,
119
+ "step": 34
120
+ },
121
+ {
122
+ "epoch": 1.15,
123
+ "learning_rate": 0.00012,
124
+ "loss": 0.9816,
125
+ "step": 36
126
+ },
127
+ {
128
+ "epoch": 1.15,
129
+ "eval_loss": 0.9444507360458374,
130
+ "eval_runtime": 30.7205,
131
+ "eval_samples_per_second": 1.628,
132
+ "eval_steps_per_second": 0.423,
133
+ "step": 36
134
+ },
135
+ {
136
+ "epoch": 2.02,
137
+ "learning_rate": 0.00012666666666666666,
138
+ "loss": 0.9175,
139
+ "step": 38
140
+ },
141
+ {
142
+ "epoch": 2.03,
143
+ "learning_rate": 0.00013333333333333334,
144
+ "loss": 0.9869,
145
+ "step": 40
146
+ },
147
+ {
148
+ "epoch": 2.05,
149
+ "learning_rate": 0.00014,
150
+ "loss": 0.8789,
151
+ "step": 42
152
+ },
153
+ {
154
+ "epoch": 2.07,
155
+ "learning_rate": 0.00014666666666666666,
156
+ "loss": 0.8595,
157
+ "step": 44
158
+ },
159
+ {
160
+ "epoch": 2.08,
161
+ "learning_rate": 0.00015333333333333334,
162
+ "loss": 0.8519,
163
+ "step": 46
164
+ },
165
+ {
166
+ "epoch": 2.1,
167
+ "learning_rate": 0.00016,
168
+ "loss": 0.8953,
169
+ "step": 48
170
+ },
171
+ {
172
+ "epoch": 2.12,
173
+ "learning_rate": 0.0001666666666666667,
174
+ "loss": 0.838,
175
+ "step": 50
176
+ },
177
+ {
178
+ "epoch": 2.13,
179
+ "learning_rate": 0.00017333333333333334,
180
+ "loss": 0.8086,
181
+ "step": 52
182
+ },
183
+ {
184
+ "epoch": 2.15,
185
+ "learning_rate": 0.00018,
186
+ "loss": 0.7672,
187
+ "step": 54
188
+ },
189
+ {
190
+ "epoch": 2.15,
191
+ "eval_loss": 0.7806979417800903,
192
+ "eval_runtime": 30.7228,
193
+ "eval_samples_per_second": 1.627,
194
+ "eval_steps_per_second": 0.423,
195
+ "step": 54
196
+ },
197
+ {
198
+ "epoch": 3.02,
199
+ "learning_rate": 0.0001866666666666667,
200
+ "loss": 0.7616,
201
+ "step": 56
202
+ },
203
+ {
204
+ "epoch": 3.03,
205
+ "learning_rate": 0.00019333333333333333,
206
+ "loss": 0.7566,
207
+ "step": 58
208
+ },
209
+ {
210
+ "epoch": 3.05,
211
+ "learning_rate": 0.0002,
212
+ "loss": 0.7002,
213
+ "step": 60
214
+ },
215
+ {
216
+ "epoch": 3.07,
217
+ "learning_rate": 0.00019925233644859814,
218
+ "loss": 0.7163,
219
+ "step": 62
220
+ },
221
+ {
222
+ "epoch": 3.08,
223
+ "learning_rate": 0.00019850467289719628,
224
+ "loss": 0.6967,
225
+ "step": 64
226
+ },
227
+ {
228
+ "epoch": 3.1,
229
+ "learning_rate": 0.00019775700934579439,
230
+ "loss": 0.7003,
231
+ "step": 66
232
+ },
233
+ {
234
+ "epoch": 3.12,
235
+ "learning_rate": 0.00019700934579439255,
236
+ "loss": 0.6175,
237
+ "step": 68
238
+ },
239
+ {
240
+ "epoch": 3.13,
241
+ "learning_rate": 0.00019626168224299065,
242
+ "loss": 0.6722,
243
+ "step": 70
244
+ },
245
+ {
246
+ "epoch": 3.15,
247
+ "learning_rate": 0.0001955140186915888,
248
+ "loss": 0.6632,
249
+ "step": 72
250
+ },
251
+ {
252
+ "epoch": 3.15,
253
+ "eval_loss": 0.692381739616394,
254
+ "eval_runtime": 30.7879,
255
+ "eval_samples_per_second": 1.624,
256
+ "eval_steps_per_second": 0.422,
257
+ "step": 72
258
+ },
259
+ {
260
+ "epoch": 4.02,
261
+ "learning_rate": 0.00019476635514018692,
262
+ "loss": 0.6006,
263
+ "step": 74
264
+ },
265
+ {
266
+ "epoch": 4.03,
267
+ "learning_rate": 0.00019401869158878506,
268
+ "loss": 0.6184,
269
+ "step": 76
270
+ },
271
+ {
272
+ "epoch": 4.05,
273
+ "learning_rate": 0.0001932710280373832,
274
+ "loss": 0.5665,
275
+ "step": 78
276
+ },
277
+ {
278
+ "epoch": 4.07,
279
+ "learning_rate": 0.00019252336448598133,
280
+ "loss": 0.558,
281
+ "step": 80
282
+ },
283
+ {
284
+ "epoch": 4.08,
285
+ "learning_rate": 0.00019177570093457943,
286
+ "loss": 0.5758,
287
+ "step": 82
288
+ },
289
+ {
290
+ "epoch": 4.1,
291
+ "learning_rate": 0.0001910280373831776,
292
+ "loss": 0.6052,
293
+ "step": 84
294
+ },
295
+ {
296
+ "epoch": 4.12,
297
+ "learning_rate": 0.0001902803738317757,
298
+ "loss": 0.538,
299
+ "step": 86
300
+ },
301
+ {
302
+ "epoch": 4.13,
303
+ "learning_rate": 0.00018953271028037384,
304
+ "loss": 0.5648,
305
+ "step": 88
306
+ },
307
+ {
308
+ "epoch": 4.15,
309
+ "learning_rate": 0.00018878504672897197,
310
+ "loss": 0.5013,
311
+ "step": 90
312
+ },
313
+ {
314
+ "epoch": 4.15,
315
+ "eval_loss": 0.6403456926345825,
316
+ "eval_runtime": 30.7239,
317
+ "eval_samples_per_second": 1.627,
318
+ "eval_steps_per_second": 0.423,
319
+ "step": 90
320
+ }
321
+ ],
322
+ "logging_steps": 2,
323
+ "max_steps": 595,
324
+ "num_train_epochs": 5,
325
+ "save_steps": 500,
326
+ "total_flos": 1.4226611503104e+16,
327
+ "trial_name": null,
328
+ "trial_params": null
329
+ }
checkpoint-90/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:864e3214e768bbcfe866351dcefc2f3e56e5842ba3615c0cc1045f59dbca0e5f
3
+ size 4091
runs/Sep25_09-52-08_52c26e5c4baa/events.out.tfevents.1695635528.52c26e5c4baa.3801.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7776733ffb141a9e69bca5ab5b279761a9b63613fe990b578c3e8c450d25244
3
+ size 13133
special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "▁<PRE>",
4
+ "▁<MID>",
5
+ "▁<SUF>",
6
+ "▁<EOT>"
7
+ ],
8
+ "bos_token": "<s>",
9
+ "eos_token": "</s>",
10
+ "pad_token": "</s>",
11
+ "unk_token": "<unk>"
12
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ccb9c8b6b561889acea59191d66986d314e7cbd6a78abc6e49b139ca91c1e6
3
+ size 500058
tokenizer_config.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "32007": {
28
+ "content": "▁<PRE>",
29
+ "lstrip": true,
30
+ "normalized": false,
31
+ "rstrip": true,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "32008": {
36
+ "content": "▁<SUF>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": true,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "32009": {
44
+ "content": "▁<MID>",
45
+ "lstrip": true,
46
+ "normalized": false,
47
+ "rstrip": true,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "32010": {
52
+ "content": "▁<EOT>",
53
+ "lstrip": true,
54
+ "normalized": false,
55
+ "rstrip": true,
56
+ "single_word": false,
57
+ "special": true
58
+ }
59
+ },
60
+ "additional_special_tokens": [
61
+ "▁<PRE>",
62
+ "▁<MID>",
63
+ "▁<SUF>",
64
+ "▁<EOT>"
65
+ ],
66
+ "bos_token": "<s>",
67
+ "clean_up_tokenization_spaces": false,
68
+ "eos_token": "</s>",
69
+ "eot_token": "▁<EOT>",
70
+ "fill_token": "<FILL_ME>",
71
+ "legacy": null,
72
+ "middle_token": "▁<MID>",
73
+ "model_max_length": 1024,
74
+ "pad_token": null,
75
+ "prefix_token": "▁<PRE>",
76
+ "sp_model_kwargs": {},
77
+ "suffix_token": "▁<SUF>",
78
+ "tokenizer_class": "CodeLlamaTokenizer",
79
+ "unk_token": "<unk>",
80
+ "use_default_system_prompt": false
81
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:864e3214e768bbcfe866351dcefc2f3e56e5842ba3615c0cc1045f59dbca0e5f
3
+ size 4091
training_params.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "codellama/CodeLlama-7b-Instruct-hf",
3
+ "data_path": "/content/drive/MyDrive/AMK_QA/finetuning/data/input",
4
+ "project_name": "qa-model-finetune",
5
+ "train_split": "train",
6
+ "valid_split": "valid",
7
+ "text_column": "text",
8
+ "token": null,
9
+ "lr": 0.0002,
10
+ "epochs": 5,
11
+ "batch_size": 4,
12
+ "warmup_ratio": 0.1,
13
+ "gradient_accumulation": 1,
14
+ "optimizer": "adamw_torch",
15
+ "scheduler": "linear",
16
+ "weight_decay": 0.0,
17
+ "max_grad_norm": 1.0,
18
+ "seed": 42,
19
+ "add_eos_token": false,
20
+ "block_size": -1,
21
+ "use_peft": true,
22
+ "lora_r": 16,
23
+ "lora_alpha": 32,
24
+ "lora_dropout": 0.05,
25
+ "logging_steps": -1,
26
+ "evaluation_strategy": "epoch",
27
+ "save_total_limit": 1,
28
+ "save_strategy": "epoch",
29
+ "auto_find_batch_size": false,
30
+ "fp16": false,
31
+ "push_to_hub": true,
32
+ "use_int8": false,
33
+ "model_max_length": 1024,
34
+ "repo_id": "shyam-incedoinc/codellama-7b-instruct-hf-finetuned-qa",
35
+ "use_int4": true,
36
+ "trainer": "sft",
37
+ "target_modules": null,
38
+ "merge_adapter": true,
39
+ "username": null
40
+ }