imdatta0 commited on
Commit
62b5e09
1 Parent(s): 8427e8e

End of training

Browse files
README.md ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama2
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ base_model: meta-llama/Llama-2-7b-hf
7
+ model-index:
8
+ - name: llama_2_7b_MetaMathQA_40K_downNupNgateNqNkNvNo_r8_lr0.0001_bg88_alpha8_0_41
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # llama_2_7b_MetaMathQA_40K_downNupNgateNqNkNvNo_r8_lr0.0001_bg88_alpha8_0_41
16
+
17
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.5581
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 0.0001
39
+ - train_batch_size: 8
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 8
43
+ - total_train_batch_size: 64
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - lr_scheduler_warmup_steps: 0.02
47
+ - num_epochs: 1
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:------:|:----:|:---------------:|
53
+ | 1.0537 | 0.0211 | 13 | 0.9041 |
54
+ | 0.8106 | 0.0421 | 26 | 0.7414 |
55
+ | 0.6994 | 0.0632 | 39 | 0.6928 |
56
+ | 0.6515 | 0.0842 | 52 | 0.6703 |
57
+ | 0.6348 | 0.1053 | 65 | 0.6533 |
58
+ | 0.612 | 0.1264 | 78 | 0.6429 |
59
+ | 0.6204 | 0.1474 | 91 | 0.6329 |
60
+ | 0.5964 | 0.1685 | 104 | 0.6248 |
61
+ | 0.6179 | 0.1896 | 117 | 0.6181 |
62
+ | 0.6008 | 0.2106 | 130 | 0.6133 |
63
+ | 0.5846 | 0.2317 | 143 | 0.6087 |
64
+ | 0.5685 | 0.2527 | 156 | 0.6043 |
65
+ | 0.5704 | 0.2738 | 169 | 0.6001 |
66
+ | 0.5657 | 0.2949 | 182 | 0.5961 |
67
+ | 0.551 | 0.3159 | 195 | 0.5937 |
68
+ | 0.5658 | 0.3370 | 208 | 0.5904 |
69
+ | 0.5677 | 0.3580 | 221 | 0.5876 |
70
+ | 0.5589 | 0.3791 | 234 | 0.5839 |
71
+ | 0.5484 | 0.4002 | 247 | 0.5823 |
72
+ | 0.5514 | 0.4212 | 260 | 0.5802 |
73
+ | 0.5694 | 0.4423 | 273 | 0.5782 |
74
+ | 0.5551 | 0.4633 | 286 | 0.5768 |
75
+ | 0.5562 | 0.4844 | 299 | 0.5744 |
76
+ | 0.5596 | 0.5055 | 312 | 0.5728 |
77
+ | 0.5402 | 0.5265 | 325 | 0.5714 |
78
+ | 0.5461 | 0.5476 | 338 | 0.5698 |
79
+ | 0.536 | 0.5687 | 351 | 0.5691 |
80
+ | 0.5421 | 0.5897 | 364 | 0.5668 |
81
+ | 0.5397 | 0.6108 | 377 | 0.5660 |
82
+ | 0.5397 | 0.6318 | 390 | 0.5652 |
83
+ | 0.547 | 0.6529 | 403 | 0.5641 |
84
+ | 0.5465 | 0.6740 | 416 | 0.5634 |
85
+ | 0.546 | 0.6950 | 429 | 0.5623 |
86
+ | 0.5434 | 0.7161 | 442 | 0.5617 |
87
+ | 0.5366 | 0.7371 | 455 | 0.5611 |
88
+ | 0.5306 | 0.7582 | 468 | 0.5603 |
89
+ | 0.5441 | 0.7793 | 481 | 0.5600 |
90
+ | 0.5305 | 0.8003 | 494 | 0.5592 |
91
+ | 0.5255 | 0.8214 | 507 | 0.5588 |
92
+ | 0.546 | 0.8424 | 520 | 0.5587 |
93
+ | 0.545 | 0.8635 | 533 | 0.5586 |
94
+ | 0.5377 | 0.8846 | 546 | 0.5584 |
95
+ | 0.5361 | 0.9056 | 559 | 0.5582 |
96
+ | 0.5315 | 0.9267 | 572 | 0.5581 |
97
+ | 0.5394 | 0.9478 | 585 | 0.5580 |
98
+ | 0.534 | 0.9688 | 598 | 0.5579 |
99
+ | 0.5362 | 0.9899 | 611 | 0.5581 |
100
+
101
+
102
+ ### Framework versions
103
+
104
+ - PEFT 0.7.1
105
+ - Transformers 4.40.2
106
+ - Pytorch 2.3.0+cu121
107
+ - Datasets 2.19.1
108
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": [
11
+ 0,
12
+ 1,
13
+ 2,
14
+ 3,
15
+ 4,
16
+ 5,
17
+ 6,
18
+ 7,
19
+ 8,
20
+ 9,
21
+ 10,
22
+ 11,
23
+ 12,
24
+ 13,
25
+ 14,
26
+ 15,
27
+ 16,
28
+ 17,
29
+ 18,
30
+ 19,
31
+ 20,
32
+ 21,
33
+ 22,
34
+ 23,
35
+ 24,
36
+ 25,
37
+ 26,
38
+ 27,
39
+ 28,
40
+ 29,
41
+ 30,
42
+ 31,
43
+ 32,
44
+ 33,
45
+ 34,
46
+ 35,
47
+ 36,
48
+ 37,
49
+ 38,
50
+ 39,
51
+ 40
52
+ ],
53
+ "loftq_config": {},
54
+ "lora_alpha": 8,
55
+ "lora_dropout": 0,
56
+ "megatron_config": null,
57
+ "megatron_core": "megatron.core",
58
+ "modules_to_save": null,
59
+ "peft_type": "LORA",
60
+ "r": 8,
61
+ "rank_pattern": {},
62
+ "revision": "unsloth",
63
+ "target_modules": [
64
+ "gate_proj",
65
+ "o_proj",
66
+ "up_proj",
67
+ "v_proj",
68
+ "k_proj",
69
+ "q_proj",
70
+ "down_proj"
71
+ ],
72
+ "task_type": "CAUSAL_LM"
73
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82742ffaf44f0f03eff7b6ae09476d9b9c2be475b3400bfa55953a774bfaec5a
3
+ size 80013120
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<s>",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "legacy": false,
34
+ "model_max_length": 4096,
35
+ "pad_token": "<unk>",
36
+ "padding_side": "right",
37
+ "sp_model_kwargs": {},
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": false
41
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63779f6d929163ffbafd72c10103ab1ffb3472b9f9bf0cb13a9a335c123825e6
3
+ size 5240