kyryl-georgian commited on
Commit
49557a6
·
verified ·
1 Parent(s): 838cf87

End of training

Browse files
README.md CHANGED
@@ -1,9 +1,8 @@
1
  ---
2
  license: apache-2.0
3
- library_name: peft
4
  tags:
5
  - generated_from_trainer
6
- base_model: google/flan-t5-small
7
  model-index:
8
  - name: flan-small-sql
9
  results: []
@@ -16,7 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [google/flan-t5-small](https://huggingface.co/google/flan-t5-small) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.1380
20
 
21
  ## Model description
22
 
@@ -41,30 +40,15 @@ The following hyperparameters were used during training:
41
  - seed: 42
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
- - num_epochs: 10.0
45
 
46
  ### Training results
47
 
48
- | Training Loss | Epoch | Step | Validation Loss |
49
- |:-------------:|:-----:|:----:|:---------------:|
50
- | 0.4956 | 0.8 | 500 | 0.2179 |
51
- | 0.3076 | 1.6 | 1000 | 0.1833 |
52
- | 0.2707 | 2.4 | 1500 | 0.1702 |
53
- | 0.2439 | 3.2 | 2000 | 0.1608 |
54
- | 0.2295 | 4.0 | 2500 | 0.1565 |
55
- | 0.2163 | 4.8 | 3000 | 0.1502 |
56
- | 0.2058 | 5.6 | 3500 | 0.1501 |
57
- | 0.2009 | 6.4 | 4000 | 0.1458 |
58
- | 0.1898 | 7.2 | 4500 | 0.1427 |
59
- | 0.1874 | 8.0 | 5000 | 0.1428 |
60
- | 0.1792 | 8.8 | 5500 | 0.1390 |
61
- | 0.1757 | 9.6 | 6000 | 0.1386 |
62
 
63
 
64
  ### Framework versions
65
 
66
- - PEFT 0.7.1
67
- - Transformers 4.38.0
68
- - Pytorch 2.1.2+cu121
69
- - Datasets 2.17.0
70
- - Tokenizers 0.15.2
 
1
  ---
2
  license: apache-2.0
3
+ base_model: google/flan-t5-small
4
  tags:
5
  - generated_from_trainer
 
6
  model-index:
7
  - name: flan-small-sql
8
  results: []
 
15
 
16
  This model is a fine-tuned version of [google/flan-t5-small](https://huggingface.co/google/flan-t5-small) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 2.0431
19
 
20
  ## Model description
21
 
 
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
43
+ - num_epochs: 0.001
44
 
45
  ### Training results
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
 
49
  ### Framework versions
50
 
51
+ - Transformers 4.36.0.dev0
52
+ - Pytorch 2.1.0+cu118
53
+ - Datasets 2.14.6
54
+ - Tokenizers 0.14.1
 
adapter_config.json CHANGED
@@ -19,8 +19,9 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "q",
23
- "v"
24
  ],
25
- "task_type": "SEQ_2_SEQ_LM"
 
26
  }
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "v",
23
+ "q"
24
  ],
25
+ "task_type": "SEQ_2_SEQ_LM",
26
+ "use_rslora": false
27
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a300d41e02bb949781fc8216dc693c26afaa57a55483ed09881bfa83438f6dc
3
  size 2765880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73d36a8bd1459a02f673c0ce7dde2ca37d3c20c69e509002c959ecb70c629af4
3
  size 2765880
all_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "epoch": 10.0,
3
- "eval_loss": 0.1379527598619461,
4
- "eval_runtime": 13.5172,
5
- "eval_samples_per_second": 581.261,
6
- "eval_steps_per_second": 36.398,
7
- "train_loss": 0.23933088928222657,
8
- "train_runtime": 648.1335,
9
- "train_samples_per_second": 154.289,
10
- "train_steps_per_second": 9.643
11
  }
 
1
  {
2
+ "epoch": 0.0,
3
+ "eval_loss": 2.043109178543091,
4
+ "eval_runtime": 12.8998,
5
+ "eval_samples_per_second": 609.078,
6
+ "eval_steps_per_second": 38.14,
7
+ "train_loss": 2.5783498287200928,
8
+ "train_runtime": 0.3238,
9
+ "train_samples_per_second": 30.88,
10
+ "train_steps_per_second": 3.088
11
  }
emissions.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ timestamp,experiment_id,project_name,duration,emissions,energy_consumed,country_name,country_iso_code,region,on_cloud,cloud_provider,cloud_region
2
+ 2024-02-24T00:05:40,16ea20c8-bcb1-453e-bce2-f6cb0599f084,codecarbon,0.3465697765350342,2.382074432305905e-05,6.453136242463374e-05,United States,USA,virginia,N,,
3
+ 2024-02-24T00:08:29,9fde16a4-8a69-4bb9-a34b-f48f907546a5,codecarbon,0.3421628475189209,2.3325881593192804e-05,6.319075922020199e-05,United States,USA,virginia,N,,
eval_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "epoch": 10.0,
3
- "eval_loss": 0.1379527598619461,
4
- "eval_runtime": 13.5172,
5
- "eval_samples_per_second": 581.261,
6
- "eval_steps_per_second": 36.398
7
  }
 
1
  {
2
+ "epoch": 0.0,
3
+ "eval_loss": 2.043109178543091,
4
+ "eval_runtime": 12.8998,
5
+ "eval_samples_per_second": 609.078,
6
+ "eval_steps_per_second": 38.14
7
  }
runs/Feb24_00-04-25_bd236bb80193/events.out.tfevents.1708733095.bd236bb80193.37.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14bd2e3ee3c5b4adde9589d9edd0fe5032b6ac6246666af865022bb732304326
3
+ size 5342
runs/Feb24_00-05-16_bd236bb80193/events.out.tfevents.1708733139.bd236bb80193.413.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:056ffc6ba8d477c8d7a49b8aed88ea3551315eb633fbd72eae1b4520cb6024c1
3
+ size 5691
runs/Feb24_00-05-16_bd236bb80193/events.out.tfevents.1708733153.bd236bb80193.413.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a923660a4f2cfd27f425ca843977587f67ee4cfd04b0c3883553ba83d9fb6df
3
+ size 354
runs/Feb24_00-08-06_bd236bb80193/events.out.tfevents.1708733309.bd236bb80193.988.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:933752e60e1039ea736004287e5574a9b6b48e73fbfb2b69c188daa264b5bcd3
3
+ size 5691
runs/Feb24_00-08-06_bd236bb80193/events.out.tfevents.1708733322.bd236bb80193.988.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb40150c9eef6df850249faccc4638b6810165c4ff7c89209825de66112280fa
3
+ size 354
special_tokens_map.json CHANGED
@@ -101,25 +101,7 @@
101
  "<extra_id_98>",
102
  "<extra_id_99>"
103
  ],
104
- "eos_token": {
105
- "content": "</s>",
106
- "lstrip": false,
107
- "normalized": false,
108
- "rstrip": false,
109
- "single_word": false
110
- },
111
- "pad_token": {
112
- "content": "<pad>",
113
- "lstrip": false,
114
- "normalized": false,
115
- "rstrip": false,
116
- "single_word": false
117
- },
118
- "unk_token": {
119
- "content": "<unk>",
120
- "lstrip": false,
121
- "normalized": false,
122
- "rstrip": false,
123
- "single_word": false
124
- }
125
  }
 
101
  "<extra_id_98>",
102
  "<extra_id_99>"
103
  ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer.json CHANGED
@@ -964,8 +964,7 @@
964
  "pre_tokenizer": {
965
  "type": "Metaspace",
966
  "replacement": "▁",
967
- "add_prefix_space": true,
968
- "prepend_scheme": "always"
969
  },
970
  "post_processor": {
971
  "type": "TemplateProcessing",
@@ -1024,8 +1023,7 @@
1024
  "decoder": {
1025
  "type": "Metaspace",
1026
  "replacement": "▁",
1027
- "add_prefix_space": true,
1028
- "prepend_scheme": "always"
1029
  },
1030
  "model": {
1031
  "type": "Unigram",
 
964
  "pre_tokenizer": {
965
  "type": "Metaspace",
966
  "replacement": "▁",
967
+ "add_prefix_space": true
 
968
  },
969
  "post_processor": {
970
  "type": "TemplateProcessing",
 
1023
  "decoder": {
1024
  "type": "Metaspace",
1025
  "replacement": "▁",
1026
+ "add_prefix_space": true
 
1027
  },
1028
  "model": {
1029
  "type": "Unigram",
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "epoch": 10.0,
3
- "train_loss": 0.23933088928222657,
4
- "train_runtime": 648.1335,
5
- "train_samples_per_second": 154.289,
6
- "train_steps_per_second": 9.643
7
  }
 
1
  {
2
+ "epoch": 0.0,
3
+ "train_loss": 2.5783498287200928,
4
+ "train_runtime": 0.3238,
5
+ "train_samples_per_second": 30.88,
6
+ "train_steps_per_second": 3.088
7
  }
trainer_state.json CHANGED
@@ -1,210 +1,28 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 10.0,
5
  "eval_steps": 500,
6
- "global_step": 6250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.8,
13
- "grad_norm": 0.5397563576698303,
14
- "learning_rate": 0.00092,
15
- "loss": 0.4956,
16
- "step": 500
17
- },
18
- {
19
- "epoch": 0.8,
20
- "eval_loss": 0.2178906500339508,
21
- "eval_runtime": 13.319,
22
- "eval_samples_per_second": 589.908,
23
- "eval_steps_per_second": 36.94,
24
- "step": 500
25
- },
26
- {
27
- "epoch": 1.6,
28
- "grad_norm": 0.549730122089386,
29
- "learning_rate": 0.00084,
30
- "loss": 0.3076,
31
- "step": 1000
32
- },
33
- {
34
- "epoch": 1.6,
35
- "eval_loss": 0.18326763808727264,
36
- "eval_runtime": 13.3435,
37
- "eval_samples_per_second": 588.825,
38
- "eval_steps_per_second": 36.872,
39
- "step": 1000
40
- },
41
- {
42
- "epoch": 2.4,
43
- "grad_norm": 0.5204197764396667,
44
- "learning_rate": 0.00076,
45
- "loss": 0.2707,
46
- "step": 1500
47
- },
48
- {
49
- "epoch": 2.4,
50
- "eval_loss": 0.17023101449012756,
51
- "eval_runtime": 13.2516,
52
- "eval_samples_per_second": 592.91,
53
- "eval_steps_per_second": 37.128,
54
- "step": 1500
55
- },
56
- {
57
- "epoch": 3.2,
58
- "grad_norm": 0.3998057246208191,
59
- "learning_rate": 0.00068,
60
- "loss": 0.2439,
61
- "step": 2000
62
- },
63
- {
64
- "epoch": 3.2,
65
- "eval_loss": 0.160837784409523,
66
- "eval_runtime": 13.2287,
67
- "eval_samples_per_second": 593.936,
68
- "eval_steps_per_second": 37.192,
69
- "step": 2000
70
- },
71
- {
72
- "epoch": 4.0,
73
- "grad_norm": 0.41501811146736145,
74
- "learning_rate": 0.0006,
75
- "loss": 0.2295,
76
- "step": 2500
77
- },
78
- {
79
- "epoch": 4.0,
80
- "eval_loss": 0.15653032064437866,
81
- "eval_runtime": 13.2124,
82
- "eval_samples_per_second": 594.669,
83
- "eval_steps_per_second": 37.238,
84
- "step": 2500
85
- },
86
- {
87
- "epoch": 4.8,
88
- "grad_norm": 0.4228770136833191,
89
- "learning_rate": 0.0005200000000000001,
90
- "loss": 0.2163,
91
- "step": 3000
92
- },
93
- {
94
- "epoch": 4.8,
95
- "eval_loss": 0.15016524493694305,
96
- "eval_runtime": 13.3446,
97
- "eval_samples_per_second": 588.779,
98
- "eval_steps_per_second": 36.869,
99
- "step": 3000
100
- },
101
- {
102
- "epoch": 5.6,
103
- "grad_norm": 0.48046761751174927,
104
- "learning_rate": 0.00044,
105
- "loss": 0.2058,
106
- "step": 3500
107
- },
108
- {
109
- "epoch": 5.6,
110
- "eval_loss": 0.15010309219360352,
111
- "eval_runtime": 13.3435,
112
- "eval_samples_per_second": 588.824,
113
- "eval_steps_per_second": 36.872,
114
- "step": 3500
115
- },
116
- {
117
- "epoch": 6.4,
118
- "grad_norm": 0.3269839882850647,
119
- "learning_rate": 0.00035999999999999997,
120
- "loss": 0.2009,
121
- "step": 4000
122
- },
123
- {
124
- "epoch": 6.4,
125
- "eval_loss": 0.1457582265138626,
126
- "eval_runtime": 13.3297,
127
- "eval_samples_per_second": 589.435,
128
- "eval_steps_per_second": 36.91,
129
- "step": 4000
130
- },
131
- {
132
- "epoch": 7.2,
133
- "grad_norm": 0.618806004524231,
134
- "learning_rate": 0.00028000000000000003,
135
- "loss": 0.1898,
136
- "step": 4500
137
- },
138
- {
139
- "epoch": 7.2,
140
- "eval_loss": 0.14268401265144348,
141
- "eval_runtime": 13.3194,
142
- "eval_samples_per_second": 589.89,
143
- "eval_steps_per_second": 36.939,
144
- "step": 4500
145
- },
146
- {
147
- "epoch": 8.0,
148
- "grad_norm": 0.32574233412742615,
149
- "learning_rate": 0.0002,
150
- "loss": 0.1874,
151
- "step": 5000
152
- },
153
- {
154
- "epoch": 8.0,
155
- "eval_loss": 0.14275924861431122,
156
- "eval_runtime": 13.3109,
157
- "eval_samples_per_second": 590.268,
158
- "eval_steps_per_second": 36.962,
159
- "step": 5000
160
- },
161
- {
162
- "epoch": 8.8,
163
- "grad_norm": 0.5372352600097656,
164
- "learning_rate": 0.00012,
165
- "loss": 0.1792,
166
- "step": 5500
167
- },
168
- {
169
- "epoch": 8.8,
170
- "eval_loss": 0.13903391361236572,
171
- "eval_runtime": 13.3118,
172
- "eval_samples_per_second": 590.229,
173
- "eval_steps_per_second": 36.96,
174
- "step": 5500
175
- },
176
- {
177
- "epoch": 9.6,
178
- "grad_norm": 0.2961955964565277,
179
- "learning_rate": 4e-05,
180
- "loss": 0.1757,
181
- "step": 6000
182
- },
183
- {
184
- "epoch": 9.6,
185
- "eval_loss": 0.13857147097587585,
186
- "eval_runtime": 13.3572,
187
- "eval_samples_per_second": 588.22,
188
- "eval_steps_per_second": 36.834,
189
- "step": 6000
190
- },
191
- {
192
- "epoch": 10.0,
193
- "step": 6250,
194
- "total_flos": 3231326208000000.0,
195
- "train_loss": 0.23933088928222657,
196
- "train_runtime": 648.1335,
197
- "train_samples_per_second": 154.289,
198
- "train_steps_per_second": 9.643
199
  }
200
  ],
201
  "logging_steps": 500,
202
- "max_steps": 6250,
203
- "num_input_tokens_seen": 0,
204
- "num_train_epochs": 10,
205
  "save_steps": 500,
206
- "total_flos": 3231326208000000.0,
207
- "train_batch_size": 16,
208
  "trial_name": null,
209
  "trial_params": null
210
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0016,
5
  "eval_steps": 500,
6
+ "global_step": 1,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
+ "step": 1,
14
+ "total_flos": 517012193280.0,
15
+ "train_loss": 2.5783498287200928,
16
+ "train_runtime": 0.3238,
17
+ "train_samples_per_second": 30.88,
18
+ "train_steps_per_second": 3.088
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  }
20
  ],
21
  "logging_steps": 500,
22
+ "max_steps": 1,
23
+ "num_train_epochs": 1,
 
24
  "save_steps": 500,
25
+ "total_flos": 517012193280.0,
 
26
  "trial_name": null,
27
  "trial_params": null
28
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de0a6dccc98d554d66a76fdbd912807ac60d3ab3c7edc5013d73b41ae0605b67
3
- size 5048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8ecf515d6c00e0bde4f518167dadc8c68220a2f8351cd581c44d0e29e7c706
3
+ size 4856