ShynBui commited on
Commit
6a887f5
·
verified ·
1 Parent(s): 34527d0

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: FacebookAI/xlm-roberta-large
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: xlm-roberta-large_product_classifier
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # xlm-roberta-large_product_classifier
18
+
19
+ This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.3981
22
+ - Accuracy: 0.8169
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 2e-05
42
+ - train_batch_size: 32
43
+ - eval_batch_size: 32
44
+ - seed: 42
45
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 20
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
52
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
53
+ | No log | 1.0 | 490 | 0.8869 | 0.7423 |
54
+ | 1.3297 | 2.0 | 980 | 0.7796 | 0.7798 |
55
+ | 0.7265 | 3.0 | 1470 | 0.7592 | 0.7872 |
56
+ | 0.5509 | 4.0 | 1960 | 0.8112 | 0.7949 |
57
+ | 0.4258 | 5.0 | 2450 | 0.8498 | 0.7875 |
58
+ | 0.3307 | 6.0 | 2940 | 0.8326 | 0.8036 |
59
+ | 0.2702 | 7.0 | 3430 | 0.8833 | 0.8066 |
60
+ | 0.2078 | 8.0 | 3920 | 0.9260 | 0.8066 |
61
+ | 0.1571 | 9.0 | 4410 | 0.9800 | 0.8087 |
62
+ | 0.1242 | 10.0 | 4900 | 1.0725 | 0.8043 |
63
+ | 0.0962 | 11.0 | 5390 | 1.2147 | 0.7946 |
64
+ | 0.0857 | 12.0 | 5880 | 1.1705 | 0.8123 |
65
+ | 0.0667 | 13.0 | 6370 | 1.2551 | 0.8041 |
66
+ | 0.052 | 14.0 | 6860 | 1.2762 | 0.8184 |
67
+ | 0.0414 | 15.0 | 7350 | 1.3442 | 0.8115 |
68
+ | 0.0313 | 16.0 | 7840 | 1.3510 | 0.8130 |
69
+ | 0.0247 | 17.0 | 8330 | 1.3754 | 0.8133 |
70
+ | 0.0158 | 18.0 | 8820 | 1.3915 | 0.8135 |
71
+ | 0.0162 | 19.0 | 9310 | 1.3975 | 0.8186 |
72
+ | 0.0109 | 20.0 | 9800 | 1.3981 | 0.8169 |
73
+
74
+
75
+ ### Framework versions
76
+
77
+ - Transformers 4.48.0
78
+ - Pytorch 2.5.1+cu124
79
+ - Datasets 2.21.0
80
+ - Tokenizers 0.21.0
config.json ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "FacebookAI/xlm-roberta-large",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "id2label": {
14
+ "0": "Th\u1eddi Trang Nam",
15
+ "1": "Th\u1eddi Trang N\u1eef",
16
+ "2": "Th\u1eddi Trang Tr\u1ebb Em",
17
+ "3": "Gi\u00e0y D\u00e9p N\u1eef",
18
+ "4": "Gi\u00e0y D\u00e9p Nam",
19
+ "5": "T\u00fai V\u00ed N\u1eef",
20
+ "6": "Balo & T\u00fai V\u00ed Nam",
21
+ "7": "Ph\u1ee5 Ki\u1ec7n & Trang S\u1ee9c N\u1eef",
22
+ "8": "\u0110i\u1ec7n Tho\u1ea1i & Ph\u1ee5 Ki\u1ec7n",
23
+ "9": "Thi\u1ebft B\u1ecb \u0110i\u1ec7n T\u1eed",
24
+ "10": "M\u00e1y T\u00ednh & Laptop",
25
+ "11": "M\u00e1y \u1ea2nh & M\u00e1y Quay Phim",
26
+ "12": "Thi\u1ebft B\u1ecb \u0110i\u1ec7n Gia D\u1ee5ng",
27
+ "13": "Nh\u00e0 C\u1eeda & \u0110\u1eddi S\u1ed1ng",
28
+ "14": "Gi\u1eb7t Gi\u0169 & Ch\u0103m S\u00f3c Nh\u00e0 C\u1eeda",
29
+ "15": "D\u1ee5ng C\u1ee5 & Thi\u1ebft B\u1ecb Ti\u1ec7n \u00cdch",
30
+ "16": "S\u1eafc \u0110\u1eb9p",
31
+ "17": "S\u1ee9c Kh\u1ecfe",
32
+ "18": "M\u1eb9 & B\u00e9",
33
+ "19": "\u0110\u1ed3 Ch\u01a1i",
34
+ "20": "Th\u1ec3 Thao & Du L\u1ecbch",
35
+ "21": "\u00d4 T\u00f4",
36
+ "22": "Xe M\u00e1y",
37
+ "23": "Xe \u0110\u1ea1p",
38
+ "24": "B\u00e1ch H\u00f3a Online",
39
+ "25": "Voucher & D\u1ecbch V\u1ee5",
40
+ "26": "Nh\u00e0 S\u00e1ch Online",
41
+ "27": "Ch\u0103m S\u00f3c Th\u00fa C\u01b0ng"
42
+ },
43
+ "initializer_range": 0.02,
44
+ "intermediate_size": 4096,
45
+ "label2id": {
46
+ "Balo & T\u00fai V\u00ed Nam": 6,
47
+ "B\u00e1ch H\u00f3a Online": 24,
48
+ "Ch\u0103m S\u00f3c Th\u00fa C\u01b0ng": 27,
49
+ "D\u1ee5ng C\u1ee5 & Thi\u1ebft B\u1ecb Ti\u1ec7n \u00cdch": 15,
50
+ "Gi\u00e0y D\u00e9p Nam": 4,
51
+ "Gi\u00e0y D\u00e9p N\u1eef": 3,
52
+ "Gi\u1eb7t Gi\u0169 & Ch\u0103m S\u00f3c Nh\u00e0 C\u1eeda": 14,
53
+ "M\u00e1y T\u00ednh & Laptop": 10,
54
+ "M\u00e1y \u1ea2nh & M\u00e1y Quay Phim": 11,
55
+ "M\u1eb9 & B\u00e9": 18,
56
+ "Nh\u00e0 C\u1eeda & \u0110\u1eddi S\u1ed1ng": 13,
57
+ "Nh\u00e0 S\u00e1ch Online": 26,
58
+ "Ph\u1ee5 Ki\u1ec7n & Trang S\u1ee9c N\u1eef": 7,
59
+ "S\u1eafc \u0110\u1eb9p": 16,
60
+ "S\u1ee9c Kh\u1ecfe": 17,
61
+ "Thi\u1ebft B\u1ecb \u0110i\u1ec7n Gia D\u1ee5ng": 12,
62
+ "Thi\u1ebft B\u1ecb \u0110i\u1ec7n T\u1eed": 9,
63
+ "Th\u1ec3 Thao & Du L\u1ecbch": 20,
64
+ "Th\u1eddi Trang Nam": 0,
65
+ "Th\u1eddi Trang N\u1eef": 1,
66
+ "Th\u1eddi Trang Tr\u1ebb Em": 2,
67
+ "T\u00fai V\u00ed N\u1eef": 5,
68
+ "Voucher & D\u1ecbch V\u1ee5": 25,
69
+ "Xe M\u00e1y": 22,
70
+ "Xe \u0110\u1ea1p": 23,
71
+ "\u00d4 T\u00f4": 21,
72
+ "\u0110i\u1ec7n Tho\u1ea1i & Ph\u1ee5 Ki\u1ec7n": 8,
73
+ "\u0110\u1ed3 Ch\u01a1i": 19
74
+ },
75
+ "layer_norm_eps": 1e-05,
76
+ "max_position_embeddings": 514,
77
+ "model_type": "xlm-roberta",
78
+ "num_attention_heads": 16,
79
+ "num_hidden_layers": 24,
80
+ "output_past": true,
81
+ "pad_token_id": 1,
82
+ "position_embedding_type": "absolute",
83
+ "problem_type": "single_label_classification",
84
+ "torch_dtype": "float32",
85
+ "transformers_version": "4.48.0",
86
+ "type_vocab_size": 1,
87
+ "use_cache": true,
88
+ "vocab_size": 250002
89
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:059ca498e603aeebc26b26cb83d7cc5de36752936034492cd04f0480900b6797
3
+ size 2239725272
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a514807cffabd8abaf028cfaffe7ff0c4f60b97ea2db80c41f14172ae6b018ca
3
+ size 17082987
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "tokenizer_class": "XLMRobertaTokenizer",
54
+ "unk_token": "<unk>"
55
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bc94f565ef6c257d649a54d69efa6e13ff8c33bc5dcf5e2511f203d0999a9f4
3
+ size 5304