sujithatz commited on
Commit
4ef2579
·
verified ·
1 Parent(s): 85a2f87

sujithatz/finbot-transofrmer-based-phi3.5_adapter

Browse files
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- base_model: microsoft/Phi-3-mini-4k-instruct
3
  library_name: peft
4
  license: mit
5
  tags:
@@ -16,9 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # phi-3-mini-LoRA
18
 
19
- This model is a fine-tuned version of [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.2990
22
 
23
  ## Model description
24
 
@@ -37,28 +37,77 @@ More information needed
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
- - learning_rate: 0.0002
41
- - train_batch_size: 2
42
- - eval_batch_size: 4
43
  - seed: 42
44
  - gradient_accumulation_steps: 4
45
- - total_train_batch_size: 8
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
- - lr_scheduler_warmup_ratio: 0.1
49
- - num_epochs: 5
50
 
51
  ### Training results
52
 
53
- | Training Loss | Epoch | Step | Validation Loss |
54
- |:-------------:|:------:|:----:|:---------------:|
55
- | 0.619 | 4.6667 | 70 | 0.2990 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
 
58
  ### Framework versions
59
 
60
- - PEFT 0.13.0
61
- - Transformers 4.45.1
62
- - Pytorch 2.4.1+cu121
63
  - Datasets 3.0.1
64
  - Tokenizers 0.20.0
 
1
  ---
2
+ base_model: microsoft/Phi-3.5-mini-instruct
3
  library_name: peft
4
  license: mit
5
  tags:
 
16
 
17
  # phi-3-mini-LoRA
18
 
19
+ This model is a fine-tuned version of [microsoft/Phi-3.5-mini-instruct](https://huggingface.co/microsoft/Phi-3.5-mini-instruct) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.4630
22
 
23
  ## Model description
24
 
 
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
+ - learning_rate: 1e-05
41
+ - train_batch_size: 8
42
+ - eval_batch_size: 8
43
  - seed: 42
44
  - gradient_accumulation_steps: 4
45
+ - total_train_batch_size: 32
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_ratio: 0.2
49
+ - training_steps: 250
50
 
51
  ### Training results
52
 
53
+ | Training Loss | Epoch | Step | Validation Loss |
54
+ |:-------------:|:-------:|:----:|:---------------:|
55
+ | 1.6578 | 1.3333 | 5 | 1.6863 |
56
+ | 1.6277 | 2.6667 | 10 | 1.6771 |
57
+ | 1.615 | 4.0 | 15 | 1.6615 |
58
+ | 1.5879 | 5.3333 | 20 | 1.6372 |
59
+ | 1.5835 | 6.6667 | 25 | 1.6028 |
60
+ | 1.5908 | 8.0 | 30 | 1.5586 |
61
+ | 1.5143 | 9.3333 | 35 | 1.5012 |
62
+ | 1.4633 | 10.6667 | 40 | 1.4352 |
63
+ | 1.3414 | 12.0 | 45 | 1.3606 |
64
+ | 1.3229 | 13.3333 | 50 | 1.2811 |
65
+ | 1.2218 | 14.6667 | 55 | 1.2119 |
66
+ | 1.1352 | 16.0 | 60 | 1.1488 |
67
+ | 1.0852 | 17.3333 | 65 | 1.0885 |
68
+ | 0.9989 | 18.6667 | 70 | 1.0299 |
69
+ | 0.9959 | 20.0 | 75 | 0.9757 |
70
+ | 0.921 | 21.3333 | 80 | 0.9205 |
71
+ | 0.8727 | 22.6667 | 85 | 0.8683 |
72
+ | 0.8067 | 24.0 | 90 | 0.8200 |
73
+ | 0.7785 | 25.3333 | 95 | 0.7783 |
74
+ | 0.7139 | 26.6667 | 100 | 0.7396 |
75
+ | 0.7081 | 28.0 | 105 | 0.7095 |
76
+ | 0.6705 | 29.3333 | 110 | 0.6824 |
77
+ | 0.6177 | 30.6667 | 115 | 0.6613 |
78
+ | 0.6106 | 32.0 | 120 | 0.6418 |
79
+ | 0.575 | 33.3333 | 125 | 0.6239 |
80
+ | 0.5904 | 34.6667 | 130 | 0.6083 |
81
+ | 0.5917 | 36.0 | 135 | 0.5927 |
82
+ | 0.5051 | 37.3333 | 140 | 0.5801 |
83
+ | 0.5169 | 38.6667 | 145 | 0.5656 |
84
+ | 0.5442 | 40.0 | 150 | 0.5542 |
85
+ | 0.5112 | 41.3333 | 155 | 0.5432 |
86
+ | 0.5061 | 42.6667 | 160 | 0.5321 |
87
+ | 0.5071 | 44.0 | 165 | 0.5234 |
88
+ | 0.4373 | 45.3333 | 170 | 0.5119 |
89
+ | 0.4476 | 46.6667 | 175 | 0.5049 |
90
+ | 0.3914 | 48.0 | 180 | 0.4972 |
91
+ | 0.465 | 49.3333 | 185 | 0.4914 |
92
+ | 0.4122 | 50.6667 | 190 | 0.4890 |
93
+ | 0.4209 | 52.0 | 195 | 0.4837 |
94
+ | 0.3933 | 53.3333 | 200 | 0.4784 |
95
+ | 0.3583 | 54.6667 | 205 | 0.4760 |
96
+ | 0.3952 | 56.0 | 210 | 0.4727 |
97
+ | 0.3858 | 57.3333 | 215 | 0.4708 |
98
+ | 0.3433 | 58.6667 | 220 | 0.4707 |
99
+ | 0.4041 | 60.0 | 225 | 0.4680 |
100
+ | 0.3558 | 61.3333 | 230 | 0.4665 |
101
+ | 0.382 | 62.6667 | 235 | 0.4650 |
102
+ | 0.3625 | 64.0 | 240 | 0.4638 |
103
+ | 0.3513 | 65.3333 | 245 | 0.4644 |
104
+ | 0.3541 | 66.6667 | 250 | 0.4630 |
105
 
106
 
107
  ### Framework versions
108
 
109
+ - PEFT 0.13.2
110
+ - Transformers 4.45.2
111
+ - Pytorch 2.4.0
112
  - Datasets 3.0.1
113
  - Tokenizers 0.20.0
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "microsoft/Phi-3-mini-4k-instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -10,7 +10,7 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 16,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
@@ -20,13 +20,10 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
- "up_proj",
25
- "o_proj",
26
- "k_proj",
27
- "gate_proj",
28
  "down_proj",
29
- "q_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/Phi-3.5-mini-instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 32,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
 
 
23
  "down_proj",
24
+ "o_proj",
25
+ "gate_up_proj",
26
+ "qkv_proj"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9683bef0e3f11bbdb5fef4040748cefcdfce22840d306c6861c340871b8d49d2
3
- size 35668592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34328d550a8b27f1ce0567efebc0753762b2e56158e73fcf2fbfe056028991de
3
+ size 100697728
tokenizer.json CHANGED
@@ -2,7 +2,7 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 512,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 2048,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
tokenizer_config.json CHANGED
@@ -117,11 +117,11 @@
117
  }
118
  },
119
  "bos_token": "<s>",
120
- "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
  "clean_up_tokenization_spaces": false,
122
  "eos_token": "<|endoftext|>",
123
  "legacy": false,
124
- "model_max_length": 4096,
125
  "pad_token": "<unk>",
126
  "padding_side": "left",
127
  "sp_model_kwargs": {},
 
117
  }
118
  },
119
  "bos_token": "<s>",
120
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
  "clean_up_tokenization_spaces": false,
122
  "eos_token": "<|endoftext|>",
123
  "legacy": false,
124
+ "model_max_length": 131072,
125
  "pad_token": "<unk>",
126
  "padding_side": "left",
127
  "sp_model_kwargs": {},
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ce710ba59e50ddc28a07ae112ed2d16849ac7630a3c9206dbc26cb350177409
3
  size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3083cc2b7d42aa4e26b73ee560dd2185afd1e4c08af814236bcfcf5a7c7f3fb3
3
  size 5432