damand2061 commited on
Commit
2e40aec
1 Parent(s): 95da590

End of training

Browse files
Files changed (7) hide show
  1. README.md +61 -0
  2. config.json +104 -0
  3. special_tokens_map.json +7 -0
  4. tf_model.h5 +3 -0
  5. tokenizer.json +0 -0
  6. tokenizer_config.json +57 -0
  7. vocab.txt +0 -0
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: indolem/indobertweet-base-uncased
5
+ tags:
6
+ - generated_from_keras_callback
7
+ model-index:
8
+ - name: damand2061/pfsa-id-med-indobertweet
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
13
+ probably proofread and complete it, then remove this comment. -->
14
+
15
+ # damand2061/pfsa-id-med-indobertweet
16
+
17
+ This model is a fine-tuned version of [indolem/indobertweet-base-uncased](https://huggingface.co/indolem/indobertweet-base-uncased) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Train Loss: 0.1012
20
+ - Validation Loss: 0.2632
21
+ - Validation F1: 0.8507
22
+ - Validation Accuracy: 0.9272
23
+ - Epoch: 4
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 1e-05, 'decay_steps': 19220, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
43
+ - training_precision: mixed_float16
44
+
45
+ ### Training results
46
+
47
+ | Train Loss | Validation Loss | Validation F1 | Validation Accuracy | Epoch |
48
+ |:----------:|:---------------:|:-------------:|:-------------------:|:-----:|
49
+ | 0.3339 | 0.2405 | 0.7705 | 0.9215 | 0 |
50
+ | 0.2057 | 0.2222 | 0.8239 | 0.9262 | 1 |
51
+ | 0.1616 | 0.2319 | 0.8432 | 0.9281 | 2 |
52
+ | 0.1264 | 0.2350 | 0.8509 | 0.9273 | 3 |
53
+ | 0.1012 | 0.2632 | 0.8507 | 0.9272 | 4 |
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.44.2
59
+ - TensorFlow 2.17.0
60
+ - Datasets 2.21.0
61
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "indolem/indobertweet-base-uncased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_ids": 0,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "B-EVENT",
16
+ "1": "I-EVENT",
17
+ "2": "L-EVENT",
18
+ "3": "B-STATEMENT",
19
+ "4": "I-STATEMENT",
20
+ "5": "L-STATEMENT",
21
+ "6": "B-DATETIME",
22
+ "7": "I-DATETIME",
23
+ "8": "L-DATETIME",
24
+ "9": "B-ROLE",
25
+ "10": "I-ROLE",
26
+ "11": "L-ROLE",
27
+ "12": "B-PERSON",
28
+ "13": "I-PERSON",
29
+ "14": "L-PERSON",
30
+ "15": "B-LOCATION",
31
+ "16": "I-LOCATION",
32
+ "17": "L-LOCATION",
33
+ "18": "B-ISSUE",
34
+ "19": "I-ISSUE",
35
+ "20": "L-ISSUE",
36
+ "21": "B-AFFILIATION",
37
+ "22": "I-AFFILIATION",
38
+ "23": "L-AFFILIATION",
39
+ "24": "B-PERSONCOREF",
40
+ "25": "I-PERSONCOREF",
41
+ "26": "L-PERSONCOREF",
42
+ "27": "U-EVENT",
43
+ "28": "U-AFFILIATION",
44
+ "29": "U-ROLE",
45
+ "30": "U-PERSON",
46
+ "31": "U-PERSONCOREF",
47
+ "32": "U-CUE",
48
+ "33": "U-CUECOREF",
49
+ "34": "U-LOCATION",
50
+ "35": "O"
51
+ },
52
+ "initializer_range": 0.02,
53
+ "intermediate_size": 3072,
54
+ "label2id": {
55
+ "B-AFFILIATION": 21,
56
+ "B-DATETIME": 6,
57
+ "B-EVENT": 0,
58
+ "B-ISSUE": 18,
59
+ "B-LOCATION": 15,
60
+ "B-PERSON": 12,
61
+ "B-PERSONCOREF": 24,
62
+ "B-ROLE": 9,
63
+ "B-STATEMENT": 3,
64
+ "I-AFFILIATION": 22,
65
+ "I-DATETIME": 7,
66
+ "I-EVENT": 1,
67
+ "I-ISSUE": 19,
68
+ "I-LOCATION": 16,
69
+ "I-PERSON": 13,
70
+ "I-PERSONCOREF": 25,
71
+ "I-ROLE": 10,
72
+ "I-STATEMENT": 4,
73
+ "L-AFFILIATION": 23,
74
+ "L-DATETIME": 8,
75
+ "L-EVENT": 2,
76
+ "L-ISSUE": 20,
77
+ "L-LOCATION": 17,
78
+ "L-PERSON": 14,
79
+ "L-PERSONCOREF": 26,
80
+ "L-ROLE": 11,
81
+ "L-STATEMENT": 5,
82
+ "O": 35,
83
+ "U-AFFILIATION": 28,
84
+ "U-CUE": 32,
85
+ "U-CUECOREF": 33,
86
+ "U-EVENT": 27,
87
+ "U-LOCATION": 34,
88
+ "U-PERSON": 30,
89
+ "U-PERSONCOREF": 31,
90
+ "U-ROLE": 29
91
+ },
92
+ "layer_norm_eps": 1e-12,
93
+ "max_position_embeddings": 512,
94
+ "model_type": "bert",
95
+ "num_attention_heads": 12,
96
+ "num_hidden_layers": 12,
97
+ "output_past": true,
98
+ "pad_token_id": 0,
99
+ "position_embedding_type": "absolute",
100
+ "transformers_version": "4.44.2",
101
+ "type_vocab_size": 2,
102
+ "use_cache": true,
103
+ "vocab_size": 31923
104
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:608594913cabbbabbd93484bbdae0c760db8dfffc122daad7f564a77601acbe9
3
+ size 440254552
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[MASK]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[CLS]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[SEP]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 1000000000000000019884624838656,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff