haryoaw commited on
Commit
b4a47d5
1 Parent(s): 6137742

Initial Commit

Browse files
Files changed (5) hide show
  1. README.md +89 -0
  2. config.json +53 -0
  3. eval_result_ner.json +1 -0
  4. model.safetensors +3 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: haryoaw/scenario-TCR-NER_data-univner_en
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - precision
9
+ - recall
10
+ - f1
11
+ - accuracy
12
+ model-index:
13
+ - name: scenario-non-kd-pre-ner-full-mdeberta_data-univner_en66
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # scenario-non-kd-pre-ner-full-mdeberta_data-univner_en66
21
+
22
+ This model is a fine-tuned version of [haryoaw/scenario-TCR-NER_data-univner_en](https://huggingface.co/haryoaw/scenario-TCR-NER_data-univner_en) on the None dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.1455
25
+ - Precision: 0.8256
26
+ - Recall: 0.8333
27
+ - F1: 0.8295
28
+ - Accuracy: 0.9850
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 3e-05
48
+ - train_batch_size: 32
49
+ - eval_batch_size: 32
50
+ - seed: 66
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: linear
53
+ - num_epochs: 30
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
58
+ |:-------------:|:-------:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|
59
+ | 0.0025 | 1.2755 | 500 | 0.1138 | 0.8395 | 0.8230 | 0.8312 | 0.9849 |
60
+ | 0.002 | 2.5510 | 1000 | 0.1150 | 0.8157 | 0.8292 | 0.8224 | 0.9849 |
61
+ | 0.0023 | 3.8265 | 1500 | 0.1137 | 0.8212 | 0.8416 | 0.8313 | 0.9852 |
62
+ | 0.0015 | 5.1020 | 2000 | 0.1171 | 0.8197 | 0.8520 | 0.8355 | 0.9851 |
63
+ | 0.001 | 6.3776 | 2500 | 0.1206 | 0.7990 | 0.8437 | 0.8207 | 0.9839 |
64
+ | 0.0013 | 7.6531 | 3000 | 0.1177 | 0.8233 | 0.8251 | 0.8242 | 0.9849 |
65
+ | 0.001 | 8.9286 | 3500 | 0.1177 | 0.8399 | 0.8199 | 0.8298 | 0.9855 |
66
+ | 0.0005 | 10.2041 | 4000 | 0.1209 | 0.8253 | 0.8313 | 0.8283 | 0.9851 |
67
+ | 0.0007 | 11.4796 | 4500 | 0.1285 | 0.8175 | 0.8116 | 0.8145 | 0.9843 |
68
+ | 0.0003 | 12.7551 | 5000 | 0.1335 | 0.8445 | 0.8095 | 0.8266 | 0.9850 |
69
+ | 0.0006 | 14.0306 | 5500 | 0.1371 | 0.7810 | 0.8344 | 0.8068 | 0.9829 |
70
+ | 0.0008 | 15.3061 | 6000 | 0.1322 | 0.8173 | 0.8427 | 0.8298 | 0.9844 |
71
+ | 0.0005 | 16.5816 | 6500 | 0.1271 | 0.8203 | 0.8364 | 0.8283 | 0.9850 |
72
+ | 0.0007 | 17.8571 | 7000 | 0.1200 | 0.8153 | 0.8499 | 0.8322 | 0.9854 |
73
+ | 0.0002 | 19.1327 | 7500 | 0.1321 | 0.8193 | 0.8354 | 0.8273 | 0.9846 |
74
+ | 0.0003 | 20.4082 | 8000 | 0.1355 | 0.8050 | 0.8375 | 0.8209 | 0.9846 |
75
+ | 0.0002 | 21.6837 | 8500 | 0.1413 | 0.808 | 0.8364 | 0.8220 | 0.9841 |
76
+ | 0.0003 | 22.9592 | 9000 | 0.1327 | 0.8321 | 0.8416 | 0.8369 | 0.9855 |
77
+ | 0.0001 | 24.2347 | 9500 | 0.1412 | 0.8276 | 0.8251 | 0.8263 | 0.9847 |
78
+ | 0.0001 | 25.5102 | 10000 | 0.1427 | 0.8199 | 0.8344 | 0.8271 | 0.9849 |
79
+ | 0.0001 | 26.7857 | 10500 | 0.1429 | 0.8304 | 0.8261 | 0.8282 | 0.9852 |
80
+ | 0.0001 | 28.0612 | 11000 | 0.1446 | 0.8250 | 0.8344 | 0.8296 | 0.9852 |
81
+ | 0.0001 | 29.3367 | 11500 | 0.1455 | 0.8256 | 0.8333 | 0.8295 | 0.9850 |
82
+
83
+
84
+ ### Framework versions
85
+
86
+ - Transformers 4.44.2
87
+ - Pytorch 2.1.1+cu121
88
+ - Datasets 2.14.5
89
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "haryoaw/scenario-TCR-NER_data-univner_en",
3
+ "architectures": [
4
+ "DebertaV2ForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4,
27
+ "LABEL_5": 5,
28
+ "LABEL_6": 6
29
+ },
30
+ "layer_norm_eps": 1e-07,
31
+ "max_position_embeddings": 512,
32
+ "max_relative_positions": -1,
33
+ "model_type": "deberta-v2",
34
+ "norm_rel_ebd": "layer_norm",
35
+ "num_attention_heads": 12,
36
+ "num_hidden_layers": 12,
37
+ "pad_token_id": 0,
38
+ "pooler_dropout": 0,
39
+ "pooler_hidden_act": "gelu",
40
+ "pooler_hidden_size": 768,
41
+ "pos_att_type": [
42
+ "p2c",
43
+ "c2p"
44
+ ],
45
+ "position_biased_input": false,
46
+ "position_buckets": 256,
47
+ "relative_attention": true,
48
+ "share_att_key": true,
49
+ "torch_dtype": "float32",
50
+ "transformers_version": "4.44.2",
51
+ "type_vocab_size": 0,
52
+ "vocab_size": 251000
53
+ }
eval_result_ner.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"ceb_gja": {"precision": 0.7818181818181819, "recall": 0.8775510204081632, "f1": 0.8269230769230769, "accuracy": 0.9861003861003861}, "en_pud": {"precision": 0.8064211520302171, "recall": 0.7944186046511628, "f1": 0.8003748828491095, "accuracy": 0.9805440120891575}, "de_pud": {"precision": 0.7988338192419825, "recall": 0.7911453320500481, "f1": 0.7949709864603483, "accuracy": 0.9771693779007079}, "pt_pud": {"precision": 0.8453510436432637, "recall": 0.8107370336669699, "f1": 0.8276823037621922, "accuracy": 0.98167214935703}, "ru_pud": {"precision": 0.6912751677852349, "recall": 0.6959459459459459, "f1": 0.6936026936026937, "accuracy": 0.9666752777060191}, "sv_pud": {"precision": 0.8497512437810946, "recall": 0.8299319727891157, "f1": 0.8397246804326451, "accuracy": 0.9829104634095198}, "tl_trg": {"precision": 0.9130434782608695, "recall": 0.9130434782608695, "f1": 0.9130434782608695, "accuracy": 0.9945504087193461}, "tl_ugnayan": {"precision": 0.675, "recall": 0.8181818181818182, "f1": 0.7397260273972603, "accuracy": 0.9790337283500455}, "zh_gsd": {"precision": 0.6057347670250897, "recall": 0.6610169491525424, "f1": 0.6321695760598505, "accuracy": 0.9517149517149517}, "zh_gsdsimp": {"precision": 0.6147342995169082, "recall": 0.6671035386631717, "f1": 0.6398491514770585, "accuracy": 0.9512154512154513}, "hr_set": {"precision": 0.8172205438066465, "recall": 0.7712045616535994, "f1": 0.7935460212687936, "accuracy": 0.9731244847485573}, "da_ddt": {"precision": 0.7960199004975125, "recall": 0.7158836689038032, "f1": 0.7538280329799765, "accuracy": 0.9813429113040008}, "en_ewt": {"precision": 0.8409728718428437, "recall": 0.8262867647058824, "f1": 0.833565136764024, "accuracy": 0.9823484878670757}, "pt_bosque": {"precision": 0.8317843866171004, "recall": 0.7366255144032922, "f1": 0.7813182016586644, "accuracy": 0.9756919287059846}, "sr_set": {"precision": 0.8424317617866005, "recall": 0.8016528925619835, "f1": 0.821536600120992, "accuracy": 0.9718938796952982}, "sk_snk": {"precision": 0.7617924528301887, "recall": 0.7060109289617487, "f1": 0.7328417470221215, "accuracy": 0.9621545226130653}, "sv_talbanken": {"precision": 0.8564356435643564, "recall": 0.8826530612244898, "f1": 0.8693467336683417, "accuracy": 0.9973008784413799}}
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18ba9beded0a1ce9d4711da2b71551cc8a0aa0912210513c1eb0d55e72d966f0
3
+ size 1112921036
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e90903c94486eb940ceacd15f6c68e9e901be4577d270bf4a07f3f1bd631c27a
3
+ size 5304