albertmartinez commited on
Commit
474bb70
·
1 Parent(s): 42db30a

Update README.md

Browse files
Files changed (5) hide show
  1. README.md +36 -0
  2. config.json +1 -1
  3. model.safetensors +1 -1
  4. tokenizer_config.json +2 -0
  5. training_args.bin +1 -1
README.md CHANGED
@@ -1,3 +1,39 @@
1
  ---
2
  license: mit
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
+ metrics:
4
+ - accuracy
5
+ - precision
6
+ - recall
7
+ - f1
8
+ datasets:
9
+ - albertmartinez/OSDG
10
+ pipeline_tag: text-classification
11
+ widget:
12
+ - text: "Between the Social and the Spatial - Exploring Multiple Dimensions of Poverty and Social Exclusion, Ashgate. Poverty in Europe and the USA, Exchanging Official Measurement Methods”, Maastricht Graduate School of Governance Working Paper 2007/005. Monitoring Absolute and Relative Poverty, ‘Not Enough’ Is Not the Same as ‘Much Less’”, Review of Income and Wealth, 57(2), 247-269. Poverty and Social Exclusion in Britain, The Policy Press, Bristol."
13
+ - text: "A circular economy is a way of achieving sustainable consumption and production, as well as nature positive outcomes."
14
  ---
15
+
16
+ # albertmartinez/bert-sdg-classification
17
+
18
+ This model (BERT) is for classifying text with respect to the United Nations sustainable development goals (SDG).
19
+
20
+ ## Training Hyperparameters
21
+
22
+ - Num_epoch = 10
23
+ - Learning rate = 5e-5
24
+ - Batch size = 16
25
+
26
+ ### Training results
27
+
28
+ | epoch | eval_loss | eval_accuracy | eval_precision | eval_recall | eval_f1 |
29
+ |:-----:|:------------------:|:------------------:|:------------------:|:------------------:|:------------------:|
30
+ | 1 | 0.8289520740509033 | 0.7644437495113752 | 0.7640369944809821 | 0.7644437495113752 | 0.7554162181382816 |
31
+ | 2 | 0.7316043972969055 | 0.792432178875772 | 0.7973419054011932 | 0.792432178875772 | 0.7936523802626467 |
32
+ | 3 | 0.7474315762519836 | 0.7989992963802673 | 0.8003484834993271 | 0.7989992963802673 | 0.7980647892639322 |
33
+ | 4 | 0.9092283248901367 | 0.8023610351028067 | 0.8028028170382215 | 0.8023610351028067 | 0.8010556735181147 |
34
+ | 5 | 1.0973293781280518 | 0.8040028144789305 | 0.806116786873114 | 0.8040028144789305 | 0.8037135940426907 |
35
+ | 6 | 1.2260032892227173 | 0.8032210147760144 | 0.8046046540363118 | 0.8032210147760144 | 0.8009496362737498 |
36
+ | 7 | 1.3465653657913208 | 0.8082245328746775 | 0.8079189056438383 | 0.8082245328746775 | 0.8070935517356475 |
37
+ | 8 | 1.458662509918213 | 0.8132280509733406 | 0.8124031757212116 | 0.8132280509733406 | 0.8124964838774498 |
38
+ | 9 | 1.5251907110214233 | 0.8108044718943007 | 0.8112362484949358 | 0.8108044718943007 | 0.8097338645156864 |
39
+ | 10 | 1.50314199924469 | 0.8152607302009225 | 0.8143774938584517 | 0.8152607302009225 | 0.8144630791491494 |
config.json CHANGED
@@ -57,7 +57,7 @@
57
  "position_embedding_type": "absolute",
58
  "problem_type": "single_label_classification",
59
  "torch_dtype": "float32",
60
- "transformers_version": "4.39.3",
61
  "type_vocab_size": 2,
62
  "use_cache": true,
63
  "vocab_size": 30522
 
57
  "position_embedding_type": "absolute",
58
  "problem_type": "single_label_classification",
59
  "torch_dtype": "float32",
60
+ "transformers_version": "4.40.0",
61
  "type_vocab_size": 2,
62
  "use_cache": true,
63
  "vocab_size": 30522
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4b8b899ad9218de9ecf035d7c71e123e65ef5daf691d889385af67051f2dbc5
3
  size 438001712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:695cd9d9f7409cdda8f1a6da05fdfe45451f2d53896885a42b969372042cdfc6
3
  size 438001712
tokenizer_config.json CHANGED
@@ -43,9 +43,11 @@
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
 
46
  "do_lower_case": true,
47
  "mask_token": "[MASK]",
48
  "model_max_length": 512,
 
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
51
  "strip_accents": null,
 
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
  "do_lower_case": true,
48
  "mask_token": "[MASK]",
49
  "model_max_length": 512,
50
+ "never_split": null,
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
  "strip_accents": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd9cb1264e67f702e8745f49d7df47ef2b912b0ce0f17df28712480686dac7ac
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24c8bb3148185237ef9953e1e36839fefdb6c1c6db44cb944a2e45bc12592f84
3
  size 4920