lrei commited on
Commit
f1e3845
1 Parent(s): fdc1b83

Upload 8 files

Browse files
Files changed (4) hide show
  1. config.json +3 -3
  2. pytorch_model.bin +2 -2
  3. results.txt +41 -41
  4. tokenizer_config.json +1 -2
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "distilroberta-base",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
@@ -96,12 +96,12 @@
96
  "max_position_embeddings": 514,
97
  "model_type": "roberta",
98
  "num_attention_heads": 12,
99
- "num_hidden_layers": 6,
100
  "pad_token_id": 1,
101
  "position_embedding_type": "absolute",
102
  "problem_type": "multi_label_classification",
103
  "torch_dtype": "float32",
104
- "transformers_version": "4.26.0",
105
  "type_vocab_size": 1,
106
  "use_cache": true,
107
  "vocab_size": 50265
 
1
  {
2
+ "_name_or_path": "roberta-base",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
 
96
  "max_position_embeddings": 514,
97
  "model_type": "roberta",
98
  "num_attention_heads": 12,
99
+ "num_hidden_layers": 12,
100
  "pad_token_id": 1,
101
  "position_embedding_type": "absolute",
102
  "problem_type": "multi_label_classification",
103
  "torch_dtype": "float32",
104
+ "transformers_version": "4.28.1",
105
  "type_vocab_size": 1,
106
  "use_cache": true,
107
  "vocab_size": 50265
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afe9fdc4cadd110c5535272f53141ffb9af7ff2ee16860eb1cd5b3db34a68a75
3
- size 328630645
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9183a055cc3328fb7f52ee9bc4a56fe3481d2812116ebe62e8094fe95e5f641
3
+ size 498772789
results.txt CHANGED
@@ -1,47 +1,47 @@
1
- {"f1_micro": 0.5894077448747151, "f1_macro": 0.5742826189045889, "p_macro": 0.5777223160436127, "p_micro": 0.583756345177665, "r_macro": 0.6338187993226557, "r_micro": 0.5951696377228292}
2
  precision recall f1-score support
3
 
4
- admiration 0.8049 0.3000 0.4371 110
5
- amusement 0.7377 0.8654 0.7965 52
6
- anger 0.7424 0.6806 0.7101 72
7
- annoyance 0.4557 0.6316 0.5294 57
8
- approval 0.8571 0.4898 0.6234 98
9
- boredom 0.6806 0.9245 0.7840 53
10
- calmness 0.6481 0.7778 0.7071 45
11
- caring 0.7761 0.8125 0.7939 64
12
- courage 0.5167 0.7381 0.6078 42
13
- curiosity 0.7714 0.8060 0.7883 67
14
- desire 0.7922 0.7531 0.7722 81
15
- despair 0.7111 0.7273 0.7191 44
16
- disappointment 0.4211 0.4103 0.4156 39
17
- disapproval 0.5357 0.2703 0.3593 111
18
- disgust 0.7059 0.3333 0.4528 72
19
- doubt 0.5938 0.4419 0.5067 43
20
- embarrassment 0.5769 0.6818 0.6250 22
21
- envy 0.3171 0.9286 0.4727 14
22
- excitement 0.5000 0.6316 0.5581 38
23
- faith 0.3846 0.7692 0.5128 13
24
- fear 0.4118 0.3590 0.3836 39
25
- frustration 0.5625 0.6667 0.6102 54
26
- gratitude 0.2647 0.6429 0.3750 14
27
- greed 0.6957 0.6400 0.6667 25
28
  grief 0.2553 0.8571 0.3934 14
29
- guilt 0.4500 0.6923 0.5455 13
30
- indifference 0.6889 0.8378 0.7561 37
31
- joy 0.8276 0.3934 0.5333 61
32
- love 0.6481 0.7000 0.6731 50
33
  nervousness 0.5217 0.5000 0.5106 24
34
- nostalgia 0.2342 0.8966 0.3714 29
35
- optimism 0.4815 0.3514 0.4062 37
36
- pain 0.3235 0.5000 0.3929 22
37
- pride 0.4815 0.4815 0.4815 27
38
- relief 0.5135 0.7600 0.6129 25
39
- sadness 0.6829 0.5385 0.6022 52
40
- surprise 0.7059 0.6667 0.6857 36
41
- trust 0.6750 0.6279 0.6506 43
42
 
43
- micro avg 0.5838 0.5952 0.5894 1739
44
- macro avg 0.5777 0.6338 0.5743 1739
45
- weighted avg 0.6399 0.5952 0.5882 1739
46
- samples avg 0.6329 0.6168 0.5754 1739
47
 
 
1
+ {"f1_micro": 0.6044294925707878, "f1_macro": 0.5869890898362741, "p_macro": 0.5789868714952907, "p_micro": 0.5897155361050328, "r_macro": 0.6578820391425064, "r_micro": 0.6198964922369178}
2
  precision recall f1-score support
3
 
4
+ admiration 0.7778 0.3182 0.4516 110
5
+ amusement 0.7031 0.8654 0.7759 52
6
+ anger 0.7246 0.6944 0.7092 72
7
+ annoyance 0.5250 0.7368 0.6131 57
8
+ approval 0.8333 0.5102 0.6329 98
9
+ boredom 0.6533 0.9245 0.7656 53
10
+ calmness 0.7200 0.8000 0.7579 45
11
+ caring 0.7826 0.8438 0.8120 64
12
+ courage 0.5574 0.8095 0.6602 42
13
+ curiosity 0.7746 0.8209 0.7971 67
14
+ desire 0.8125 0.8025 0.8075 81
15
+ despair 0.7857 0.7500 0.7674 44
16
+ disappointment 0.4186 0.4615 0.4390 39
17
+ disapproval 0.4915 0.2613 0.3412 111
18
+ disgust 0.7429 0.3611 0.4860 72
19
+ doubt 0.6071 0.3953 0.4789 43
20
+ embarrassment 0.4688 0.6818 0.5556 22
21
+ envy 0.3023 0.9286 0.4561 14
22
+ excitement 0.5417 0.6842 0.6047 38
23
+ faith 0.3704 0.7692 0.5000 13
24
+ fear 0.5143 0.4615 0.4865 39
25
+ frustration 0.5574 0.6296 0.5913 54
26
+ gratitude 0.2121 0.5000 0.2979 14
27
+ greed 0.5769 0.6000 0.5882 25
28
  grief 0.2553 0.8571 0.3934 14
29
+ guilt 0.4348 0.7692 0.5556 13
30
+ indifference 0.6667 0.8649 0.7529 37
31
+ joy 0.7647 0.4262 0.5474 61
32
+ love 0.6538 0.6800 0.6667 50
33
  nervousness 0.5217 0.5000 0.5106 24
34
+ nostalgia 0.2427 0.8621 0.3788 29
35
+ optimism 0.5517 0.4324 0.4848 37
36
+ pain 0.3421 0.5909 0.4333 22
37
+ pride 0.4848 0.5926 0.5333 27
38
+ relief 0.5000 0.8400 0.6269 25
39
+ sadness 0.6818 0.5769 0.6250 52
40
+ surprise 0.7222 0.7222 0.7222 36
41
+ trust 0.7250 0.6744 0.6988 43
42
 
43
+ micro avg 0.5897 0.6199 0.6044 1739
44
+ macro avg 0.5790 0.6579 0.5870 1739
45
+ weighted avg 0.6415 0.6199 0.6039 1739
46
+ samples avg 0.6462 0.6431 0.5964 1739
47
 
tokenizer_config.json CHANGED
@@ -1,15 +1,14 @@
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<s>",
 
4
  "cls_token": "<s>",
5
  "eos_token": "</s>",
6
  "errors": "replace",
7
  "mask_token": "<mask>",
8
  "model_max_length": 512,
9
- "name_or_path": "distilroberta-base",
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
12
- "special_tokens_map_file": null,
13
  "tokenizer_class": "RobertaTokenizer",
14
  "trim_offsets": true,
15
  "unk_token": "<unk>"
 
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": true,
5
  "cls_token": "<s>",
6
  "eos_token": "</s>",
7
  "errors": "replace",
8
  "mask_token": "<mask>",
9
  "model_max_length": 512,
 
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
 
12
  "tokenizer_class": "RobertaTokenizer",
13
  "trim_offsets": true,
14
  "unk_token": "<unk>"