jamesbaskerville commited on
Commit
0a830b4
·
verified ·
1 Parent(s): e5dbba3

jamesbaskerville/classify-article-titles

Browse files
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: albert/albert-base-v2
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ - f1
10
+ - precision
11
+ - recall
12
+ model-index:
13
+ - name: classify-articles
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # classify-articles
21
+
22
+ This model is a fine-tuned version of [albert/albert-base-v2](https://huggingface.co/albert/albert-base-v2) on the None dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.3819
25
+ - Accuracy: 0.9070
26
+ - F1: 0.9061
27
+ - Precision: 0.9126
28
+ - Recall: 0.9070
29
+ - Accuracy Label Economy: 0.9429
30
+ - Accuracy Label Politics: 0.9574
31
+ - Accuracy Label Science: 0.9362
32
+ - Accuracy Label Sports: 0.96
33
+ - Accuracy Label Technology: 0.6944
34
+
35
+ ## Model description
36
+
37
+ More information needed
38
+
39
+ ## Intended uses & limitations
40
+
41
+ More information needed
42
+
43
+ ## Training and evaluation data
44
+
45
+ More information needed
46
+
47
+ ## Training procedure
48
+
49
+ ### Training hyperparameters
50
+
51
+ The following hyperparameters were used during training:
52
+ - learning_rate: 2e-05
53
+ - train_batch_size: 16
54
+ - eval_batch_size: 16
55
+ - seed: 42
56
+ - gradient_accumulation_steps: 2
57
+ - total_train_batch_size: 32
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: linear
60
+ - lr_scheduler_warmup_steps: 500
61
+ - num_epochs: 3
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | Accuracy Label Economy | Accuracy Label Politics | Accuracy Label Science | Accuracy Label Sports | Accuracy Label Technology |
66
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|:----------------------:|:-----------------------:|:----------------------:|:---------------------:|:-------------------------:|
67
+ | 1.3703 | 1.3072 | 100 | 1.3775 | 0.4930 | 0.4238 | 0.6100 | 0.4930 | 0.8 | 0.0213 | 0.7021 | 0.72 | 0.2222 |
68
+ | 0.4329 | 2.6144 | 200 | 0.4495 | 0.8977 | 0.9004 | 0.9134 | 0.8977 | 0.9429 | 0.8936 | 0.9149 | 0.96 | 0.75 |
69
+
70
+
71
+ ### Framework versions
72
+
73
+ - Transformers 4.44.2
74
+ - Pytorch 2.4.1
75
+ - Datasets 2.21.0
76
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "albert/albert-base-v2",
3
+ "architectures": [
4
+ "AlbertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "down_scale_factor": 1,
10
+ "embedding_size": 128,
11
+ "eos_token_id": 3,
12
+ "gap_size": 0,
13
+ "hidden_act": "gelu_new",
14
+ "hidden_dropout_prob": 0,
15
+ "hidden_size": 768,
16
+ "id2label": {
17
+ "0": "economy",
18
+ "1": "politics",
19
+ "2": "science",
20
+ "3": "sports",
21
+ "4": "technology"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "inner_group_num": 1,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "economy": 0,
28
+ "politics": 1,
29
+ "science": 2,
30
+ "sports": 3,
31
+ "technology": 4
32
+ },
33
+ "layer_norm_eps": 1e-12,
34
+ "max_position_embeddings": 512,
35
+ "model_type": "albert",
36
+ "net_structure_type": 0,
37
+ "num_attention_heads": 12,
38
+ "num_hidden_groups": 1,
39
+ "num_hidden_layers": 12,
40
+ "num_memory_blocks": 0,
41
+ "pad_token_id": 0,
42
+ "position_embedding_type": "absolute",
43
+ "problem_type": "single_label_classification",
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.44.2",
46
+ "type_vocab_size": 2,
47
+ "vocab_size": 30000
48
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bf37bf834442ed64e7594dc101754a1a27db9fc60e46c578dae2c6a95326d59
3
+ size 46753140
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": {
6
+ "content": "[MASK]",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "[SEP]",
14
+ "unk_token": "<unk>"
15
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fefb02b667a6c5c2fe27602d28e5fb3428f66ab89c7d6f388e7c8d44a02d0336
3
+ size 760289
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": true,
48
+ "eos_token": "[SEP]",
49
+ "keep_accents": false,
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 512,
52
+ "pad_token": "<pad>",
53
+ "remove_space": true,
54
+ "sep_token": "[SEP]",
55
+ "sp_model_kwargs": {},
56
+ "tokenizer_class": "AlbertTokenizer",
57
+ "unk_token": "<unk>"
58
+ }
trainer_state.json ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.980392156862745,
5
+ "eval_steps": 100,
6
+ "global_step": 228,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.13071895424836602,
13
+ "grad_norm": 17.214561462402344,
14
+ "learning_rate": 4.0000000000000003e-07,
15
+ "loss": 1.6592,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.26143790849673204,
20
+ "grad_norm": 12.780923843383789,
21
+ "learning_rate": 8.000000000000001e-07,
22
+ "loss": 1.6322,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.39215686274509803,
27
+ "grad_norm": 30.824169158935547,
28
+ "learning_rate": 1.2000000000000002e-06,
29
+ "loss": 1.5926,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.5228758169934641,
34
+ "grad_norm": 17.90578842163086,
35
+ "learning_rate": 1.6000000000000001e-06,
36
+ "loss": 1.5943,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.6535947712418301,
41
+ "grad_norm": 17.254802703857422,
42
+ "learning_rate": 2.0000000000000003e-06,
43
+ "loss": 1.5476,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.7843137254901961,
48
+ "grad_norm": 23.29121971130371,
49
+ "learning_rate": 2.4000000000000003e-06,
50
+ "loss": 1.5345,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.9150326797385621,
55
+ "grad_norm": 21.84389305114746,
56
+ "learning_rate": 2.8000000000000003e-06,
57
+ "loss": 1.5421,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 1.0457516339869282,
62
+ "grad_norm": 24.690324783325195,
63
+ "learning_rate": 3.2000000000000003e-06,
64
+ "loss": 1.4884,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 1.1764705882352942,
69
+ "grad_norm": 25.043909072875977,
70
+ "learning_rate": 3.6000000000000003e-06,
71
+ "loss": 1.4151,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 1.3071895424836601,
76
+ "grad_norm": 25.61355209350586,
77
+ "learning_rate": 4.000000000000001e-06,
78
+ "loss": 1.3703,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 1.3071895424836601,
83
+ "eval_accuracy": 0.4930232558139535,
84
+ "eval_accuracy_label_economy": 0.8,
85
+ "eval_accuracy_label_politics": 0.02127659574468085,
86
+ "eval_accuracy_label_science": 0.7021276595744681,
87
+ "eval_accuracy_label_sports": 0.72,
88
+ "eval_accuracy_label_technology": 0.2222222222222222,
89
+ "eval_f1": 0.42383332698769244,
90
+ "eval_loss": 1.377465844154358,
91
+ "eval_precision": 0.6099528142905292,
92
+ "eval_recall": 0.4930232558139535,
93
+ "eval_runtime": 0.9308,
94
+ "eval_samples_per_second": 230.989,
95
+ "eval_steps_per_second": 15.041,
96
+ "step": 100
97
+ },
98
+ {
99
+ "epoch": 1.4379084967320261,
100
+ "grad_norm": 25.733768463134766,
101
+ "learning_rate": 4.4e-06,
102
+ "loss": 1.3219,
103
+ "step": 110
104
+ },
105
+ {
106
+ "epoch": 1.5686274509803921,
107
+ "grad_norm": 19.24130630493164,
108
+ "learning_rate": 4.800000000000001e-06,
109
+ "loss": 1.2374,
110
+ "step": 120
111
+ },
112
+ {
113
+ "epoch": 1.6993464052287581,
114
+ "grad_norm": 20.945722579956055,
115
+ "learning_rate": 5.2e-06,
116
+ "loss": 1.1398,
117
+ "step": 130
118
+ },
119
+ {
120
+ "epoch": 1.8300653594771243,
121
+ "grad_norm": 18.362749099731445,
122
+ "learning_rate": 5.600000000000001e-06,
123
+ "loss": 1.049,
124
+ "step": 140
125
+ },
126
+ {
127
+ "epoch": 1.9607843137254903,
128
+ "grad_norm": 21.393177032470703,
129
+ "learning_rate": 6e-06,
130
+ "loss": 0.9089,
131
+ "step": 150
132
+ },
133
+ {
134
+ "epoch": 2.0915032679738563,
135
+ "grad_norm": 20.148643493652344,
136
+ "learning_rate": 6.4000000000000006e-06,
137
+ "loss": 0.7627,
138
+ "step": 160
139
+ },
140
+ {
141
+ "epoch": 2.2222222222222223,
142
+ "grad_norm": 37.801212310791016,
143
+ "learning_rate": 6.800000000000001e-06,
144
+ "loss": 0.5946,
145
+ "step": 170
146
+ },
147
+ {
148
+ "epoch": 2.3529411764705883,
149
+ "grad_norm": 22.492183685302734,
150
+ "learning_rate": 7.2000000000000005e-06,
151
+ "loss": 0.6173,
152
+ "step": 180
153
+ },
154
+ {
155
+ "epoch": 2.4836601307189543,
156
+ "grad_norm": 58.739967346191406,
157
+ "learning_rate": 7.600000000000001e-06,
158
+ "loss": 0.4892,
159
+ "step": 190
160
+ },
161
+ {
162
+ "epoch": 2.6143790849673203,
163
+ "grad_norm": 31.867156982421875,
164
+ "learning_rate": 8.000000000000001e-06,
165
+ "loss": 0.4329,
166
+ "step": 200
167
+ },
168
+ {
169
+ "epoch": 2.6143790849673203,
170
+ "eval_accuracy": 0.8976744186046511,
171
+ "eval_accuracy_label_economy": 0.9428571428571428,
172
+ "eval_accuracy_label_politics": 0.8936170212765957,
173
+ "eval_accuracy_label_science": 0.9148936170212766,
174
+ "eval_accuracy_label_sports": 0.96,
175
+ "eval_accuracy_label_technology": 0.75,
176
+ "eval_f1": 0.9004308622967181,
177
+ "eval_loss": 0.4494597017765045,
178
+ "eval_precision": 0.9133808113740947,
179
+ "eval_recall": 0.8976744186046511,
180
+ "eval_runtime": 0.5691,
181
+ "eval_samples_per_second": 377.764,
182
+ "eval_steps_per_second": 24.599,
183
+ "step": 200
184
+ },
185
+ {
186
+ "epoch": 2.7450980392156863,
187
+ "grad_norm": 16.43819808959961,
188
+ "learning_rate": 8.400000000000001e-06,
189
+ "loss": 0.3908,
190
+ "step": 210
191
+ },
192
+ {
193
+ "epoch": 2.8758169934640523,
194
+ "grad_norm": 8.563356399536133,
195
+ "learning_rate": 8.8e-06,
196
+ "loss": 0.3735,
197
+ "step": 220
198
+ },
199
+ {
200
+ "epoch": 2.980392156862745,
201
+ "step": 228,
202
+ "total_flos": 9501472156644.0,
203
+ "train_loss": 1.0989631905890347,
204
+ "train_runtime": 56.2949,
205
+ "train_samples_per_second": 129.71,
206
+ "train_steps_per_second": 4.05
207
+ },
208
+ {
209
+ "epoch": 2.980392156862745,
210
+ "eval_accuracy": 0.9069767441860465,
211
+ "eval_accuracy_label_economy": 0.9428571428571428,
212
+ "eval_accuracy_label_politics": 0.9574468085106383,
213
+ "eval_accuracy_label_science": 0.9361702127659575,
214
+ "eval_accuracy_label_sports": 0.96,
215
+ "eval_accuracy_label_technology": 0.6944444444444444,
216
+ "eval_f1": 0.9060619059788495,
217
+ "eval_loss": 0.38190317153930664,
218
+ "eval_precision": 0.9125925762408804,
219
+ "eval_recall": 0.9069767441860465,
220
+ "eval_runtime": 0.626,
221
+ "eval_samples_per_second": 343.446,
222
+ "eval_steps_per_second": 22.364,
223
+ "step": 228
224
+ }
225
+ ],
226
+ "logging_steps": 10,
227
+ "max_steps": 228,
228
+ "num_input_tokens_seen": 0,
229
+ "num_train_epochs": 3,
230
+ "save_steps": 1000,
231
+ "stateful_callbacks": {
232
+ "TrainerControl": {
233
+ "args": {
234
+ "should_epoch_stop": false,
235
+ "should_evaluate": false,
236
+ "should_log": false,
237
+ "should_save": true,
238
+ "should_training_stop": true
239
+ },
240
+ "attributes": {}
241
+ }
242
+ },
243
+ "total_flos": 9501472156644.0,
244
+ "train_batch_size": 16,
245
+ "trial_name": null,
246
+ "trial_params": null
247
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b7addff3ab260590f7c38e75fee14524a056b7ecc31ded451903532746931f8
3
+ size 5176