hugo-albert commited on
Commit
c42e4c8
1 Parent(s): 653d369

Training in progress, epoch 1

Browse files
config.json ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "dccuchile/bert-base-spanish-wwm-cased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Fe",
14
+ "1": "vmg",
15
+ "2": "de",
16
+ "3": "W",
17
+ "4": "np",
18
+ "5": "vai",
19
+ "6": "Fh",
20
+ "7": "vsi",
21
+ "8": "vsg",
22
+ "9": "Fat",
23
+ "10": "Fs",
24
+ "11": "dt",
25
+ "12": "sp",
26
+ "13": "Fc",
27
+ "14": "pt",
28
+ "15": "pi",
29
+ "16": "rn",
30
+ "17": "vas",
31
+ "18": "Zm",
32
+ "19": "X",
33
+ "20": "vms",
34
+ "21": "rg",
35
+ "22": "vsn",
36
+ "23": "da",
37
+ "24": "vsm",
38
+ "25": "nc",
39
+ "26": "vss",
40
+ "27": "pe",
41
+ "28": "Fg",
42
+ "29": "Fx",
43
+ "30": "vmp",
44
+ "31": "px",
45
+ "32": "aq",
46
+ "33": "pn",
47
+ "34": "dn",
48
+ "35": "Fd",
49
+ "36": "ao",
50
+ "37": "Fp",
51
+ "38": "Zp",
52
+ "39": "vap",
53
+ "40": "Y",
54
+ "41": "I",
55
+ "42": "cs",
56
+ "43": "pr",
57
+ "44": "Z",
58
+ "45": "vmm",
59
+ "46": "vmi",
60
+ "47": "Fpt",
61
+ "48": "Fit",
62
+ "49": "van",
63
+ "50": "vag",
64
+ "51": "vmn",
65
+ "52": "p0",
66
+ "53": "Fia",
67
+ "54": "i",
68
+ "55": "Faa",
69
+ "56": "vam",
70
+ "57": "Fpa",
71
+ "58": "pp",
72
+ "59": "cc",
73
+ "60": "pd",
74
+ "61": "vsp",
75
+ "62": "dp",
76
+ "63": "Fz",
77
+ "64": "dd",
78
+ "65": "di"
79
+ },
80
+ "initializer_range": 0.02,
81
+ "intermediate_size": 3072,
82
+ "label2id": {
83
+ "Faa": 55,
84
+ "Fat": 9,
85
+ "Fc": 13,
86
+ "Fd": 35,
87
+ "Fe": 0,
88
+ "Fg": 28,
89
+ "Fh": 6,
90
+ "Fia": 53,
91
+ "Fit": 48,
92
+ "Fp": 37,
93
+ "Fpa": 57,
94
+ "Fpt": 47,
95
+ "Fs": 10,
96
+ "Fx": 29,
97
+ "Fz": 63,
98
+ "I": 41,
99
+ "W": 3,
100
+ "X": 19,
101
+ "Y": 40,
102
+ "Z": 44,
103
+ "Zm": 18,
104
+ "Zp": 38,
105
+ "ao": 36,
106
+ "aq": 32,
107
+ "cc": 59,
108
+ "cs": 42,
109
+ "da": 23,
110
+ "dd": 64,
111
+ "de": 2,
112
+ "di": 65,
113
+ "dn": 34,
114
+ "dp": 62,
115
+ "dt": 11,
116
+ "i": 54,
117
+ "nc": 25,
118
+ "np": 4,
119
+ "p0": 52,
120
+ "pd": 60,
121
+ "pe": 27,
122
+ "pi": 15,
123
+ "pn": 33,
124
+ "pp": 58,
125
+ "pr": 43,
126
+ "pt": 14,
127
+ "px": 31,
128
+ "rg": 21,
129
+ "rn": 16,
130
+ "sp": 12,
131
+ "vag": 50,
132
+ "vai": 5,
133
+ "vam": 56,
134
+ "van": 49,
135
+ "vap": 39,
136
+ "vas": 17,
137
+ "vmg": 1,
138
+ "vmi": 46,
139
+ "vmm": 45,
140
+ "vmn": 51,
141
+ "vmp": 30,
142
+ "vms": 20,
143
+ "vsg": 8,
144
+ "vsi": 7,
145
+ "vsm": 24,
146
+ "vsn": 22,
147
+ "vsp": 61,
148
+ "vss": 26
149
+ },
150
+ "layer_norm_eps": 1e-12,
151
+ "max_position_embeddings": 512,
152
+ "model_type": "bert",
153
+ "num_attention_heads": 12,
154
+ "num_hidden_layers": 12,
155
+ "output_past": true,
156
+ "pad_token_id": 1,
157
+ "position_embedding_type": "absolute",
158
+ "torch_dtype": "float32",
159
+ "transformers_version": "4.44.2",
160
+ "type_vocab_size": 2,
161
+ "use_cache": true,
162
+ "vocab_size": 31002
163
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c154cf1ae05f04dcdf79c4db027e68d71e67f1b391ffb6f6f8ed96b3875acea3
3
+ size 437267512
runs/Oct11_16-52-19_d75f4a12f671/events.out.tfevents.1728665709.d75f4a12f671.24794.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b27022d81c6ee916bf2d989c274c6e383b92852f649c4a3e64556f2917d2b5a
3
+ size 7635
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[MASK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[PAD]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "3": {
20
+ "content": "[UNK]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "4": {
28
+ "content": "[CLS]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "5": {
36
+ "content": "[SEP]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": false,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0726a53cbe4f1e3a5439ba6347813fbc3e534ecc5947c07667f4636ff8178f3d
3
+ size 5176
vocab.txt ADDED
The diff for this file is too large to render. See raw diff