anzeo commited on
Commit
4c29a5b
1 Parent(s): 6b56823

fine_tuned_rte_sloberta

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- license: cc-by-sa-4.0
3
- base_model: EMBEDDIA/sloberta
4
  tags:
5
  - generated_from_trainer
6
  metrics:
@@ -16,11 +16,11 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # fine_tuned_rte_sloberta
18
 
19
- This model is a fine-tuned version of [EMBEDDIA/sloberta](https://huggingface.co/EMBEDDIA/sloberta) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.6653
22
  - Accuracy: 0.6207
23
- - F1: 0.5750
24
 
25
  ## Model description
26
 
@@ -51,14 +51,14 @@ The following hyperparameters were used during training:
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
53
  |:-------------:|:-------:|:----:|:---------------:|:--------:|:------:|
54
- | 0.6985 | 1.7241 | 50 | 0.7540 | 0.4138 | 0.2422 |
55
- | 0.7058 | 3.4483 | 100 | 0.6912 | 0.5862 | 0.4333 |
56
- | 0.6993 | 5.1724 | 150 | 0.6980 | 0.4138 | 0.2422 |
57
- | 0.7003 | 6.8966 | 200 | 0.6806 | 0.5862 | 0.4333 |
58
- | 0.6968 | 8.6207 | 250 | 0.6730 | 0.5862 | 0.4333 |
59
- | 0.6736 | 10.3448 | 300 | 0.6726 | 0.6897 | 0.6801 |
60
- | 0.6339 | 12.0690 | 350 | 0.6580 | 0.6207 | 0.6090 |
61
- | 0.6005 | 13.7931 | 400 | 0.6653 | 0.6207 | 0.5750 |
62
 
63
 
64
  ### Framework versions
 
1
  ---
2
+ license: mit
3
+ base_model: xlm-roberta-base
4
  tags:
5
  - generated_from_trainer
6
  metrics:
 
16
 
17
  # fine_tuned_rte_sloberta
18
 
19
+ This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 1.4763
22
  - Accuracy: 0.6207
23
+ - F1: 0.5951
24
 
25
  ## Model description
26
 
 
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
53
  |:-------------:|:-------:|:----:|:---------------:|:--------:|:------:|
54
+ | 0.7117 | 1.7241 | 50 | 0.7129 | 0.4138 | 0.2422 |
55
+ | 0.7033 | 3.4483 | 100 | 0.6997 | 0.4138 | 0.2422 |
56
+ | 0.6845 | 5.1724 | 150 | 0.6933 | 0.4828 | 0.4828 |
57
+ | 0.6378 | 6.8966 | 200 | 0.8005 | 0.4828 | 0.4668 |
58
+ | 0.4579 | 8.6207 | 250 | 0.9656 | 0.6207 | 0.5951 |
59
+ | 0.2521 | 10.3448 | 300 | 1.2302 | 0.6552 | 0.6018 |
60
+ | 0.1196 | 12.0690 | 350 | 1.4679 | 0.5862 | 0.5789 |
61
+ | 0.0653 | 13.7931 | 400 | 1.4763 | 0.6207 | 0.5951 |
62
 
63
 
64
  ### Framework versions
config.json CHANGED
@@ -1,13 +1,12 @@
1
  {
2
- "_name_or_path": "EMBEDDIA/sloberta",
3
  "architectures": [
4
- "CamembertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
8
  "classifier_dropout": null,
9
  "eos_token_id": 2,
10
- "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
@@ -23,9 +22,10 @@
23
  },
24
  "layer_norm_eps": 1e-05,
25
  "max_position_embeddings": 514,
26
- "model_type": "camembert",
27
  "num_attention_heads": 12,
28
  "num_hidden_layers": 12,
 
29
  "pad_token_id": 1,
30
  "position_embedding_type": "absolute",
31
  "problem_type": "single_label_classification",
@@ -33,5 +33,5 @@
33
  "transformers_version": "4.40.2",
34
  "type_vocab_size": 1,
35
  "use_cache": false,
36
- "vocab_size": 32005
37
  }
 
1
  {
2
+ "_name_or_path": "xlm-roberta-base",
3
  "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
8
  "classifier_dropout": null,
9
  "eos_token_id": 2,
 
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 768,
 
22
  },
23
  "layer_norm_eps": 1e-05,
24
  "max_position_embeddings": 514,
25
+ "model_type": "xlm-roberta",
26
  "num_attention_heads": 12,
27
  "num_hidden_layers": 12,
28
+ "output_past": true,
29
  "pad_token_id": 1,
30
  "position_embedding_type": "absolute",
31
  "problem_type": "single_label_classification",
 
33
  "transformers_version": "4.40.2",
34
  "type_vocab_size": 1,
35
  "use_cache": false,
36
+ "vocab_size": 250002
37
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29cb0af14d650db1d9f022cffc1845fc4ee8af8e2c6bd824f93d035c81ac5f10
3
- size 442518104
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eabd24fea607f51753c670732cbff70b9cf98944aaa48ee857d18940d30c1c9
3
+ size 1112205008
runs/May22_19-18-31_DESKTOP-22QTFDR/events.out.tfevents.1716398312.DESKTOP-22QTFDR.19492.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a0be47c1bdcc7a74bba35cd0aa128b18b0e1b782a27fafd330b55e18f3fdfa8
3
+ size 9842
runs/May22_19-18-31_DESKTOP-22QTFDR/events.out.tfevents.1716398560.DESKTOP-22QTFDR.19492.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c818036a42d96e675cab730bcfc6918b5e012088f9c7c3a285fc357ee15ce3fd
3
+ size 457
sentencepiece.bpe.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34b589385a2320549143ab23b0ccf82cc99a82685701cdabe0fad847bd0479ff
3
- size 800013
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json CHANGED
@@ -1,55 +1,15 @@
1
  {
2
- "additional_special_tokens": [
3
- "<s>NOTUSED",
4
- "</s>NOTUSED"
5
- ],
6
- "bos_token": {
7
- "content": "<s>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false
12
- },
13
- "cls_token": {
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false
19
- },
20
- "eos_token": {
21
- "content": "</s>",
22
- "lstrip": false,
23
- "normalized": false,
24
- "rstrip": false,
25
- "single_word": false
26
- },
27
  "mask_token": {
28
  "content": "<mask>",
29
  "lstrip": true,
30
- "normalized": true,
31
- "rstrip": false,
32
- "single_word": false
33
- },
34
- "pad_token": {
35
- "content": "<pad>",
36
- "lstrip": false,
37
  "normalized": false,
38
  "rstrip": false,
39
  "single_word": false
40
  },
41
- "sep_token": {
42
- "content": "</s>",
43
- "lstrip": false,
44
- "normalized": false,
45
- "rstrip": false,
46
- "single_word": false
47
- },
48
- "unk_token": {
49
- "content": "<unk>",
50
- "lstrip": false,
51
- "normalized": false,
52
- "rstrip": false,
53
- "single_word": false
54
- }
55
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "mask_token": {
6
  "content": "<mask>",
7
  "lstrip": true,
 
 
 
 
 
 
 
8
  "normalized": false,
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
 
 
 
15
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "<s>NOTUSED",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
@@ -17,7 +17,7 @@
17
  "special": true
18
  },
19
  "2": {
20
- "content": "</s>NOTUSED",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
@@ -32,45 +32,23 @@
32
  "single_word": false,
33
  "special": true
34
  },
35
- "5": {
36
- "content": "<s>",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- },
43
- "6": {
44
- "content": "</s>",
45
- "lstrip": false,
46
- "normalized": false,
47
- "rstrip": false,
48
- "single_word": false,
49
- "special": true
50
- },
51
- "32004": {
52
  "content": "<mask>",
53
  "lstrip": true,
54
- "normalized": true,
55
  "rstrip": false,
56
  "single_word": false,
57
  "special": true
58
  }
59
  },
60
- "additional_special_tokens": [
61
- "<s>NOTUSED",
62
- "</s>NOTUSED"
63
- ],
64
  "bos_token": "<s>",
65
  "clean_up_tokenization_spaces": true,
66
  "cls_token": "<s>",
67
- "do_lower_case": false,
68
  "eos_token": "</s>",
69
  "mask_token": "<mask>",
70
  "model_max_length": 512,
71
  "pad_token": "<pad>",
72
  "sep_token": "</s>",
73
- "sp_model_kwargs": {},
74
- "tokenizer_class": "CamembertTokenizer",
75
  "unk_token": "<unk>"
76
  }
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "<s>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
 
17
  "special": true
18
  },
19
  "2": {
20
+ "content": "</s>",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "250001": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  "content": "<mask>",
37
  "lstrip": true,
38
+ "normalized": false,
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
  }
43
  },
 
 
 
 
44
  "bos_token": "<s>",
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "<s>",
 
47
  "eos_token": "</s>",
48
  "mask_token": "<mask>",
49
  "model_max_length": 512,
50
  "pad_token": "<pad>",
51
  "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
 
53
  "unk_token": "<unk>"
54
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc80e643402c7ee6d9d8ca8f6132e33e609f3728a236a07b663529646f673f88
3
  size 5048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36dd1dd9f5c346ceaa1b3d7782066fd60c5d48c515089da4fb6079288a85a7c7
3
  size 5048