KoichiYasuoka commited on
Commit
5b12b0e
1 Parent(s): a56c659

initial release

Browse files
Files changed (9) hide show
  1. README.md +76 -0
  2. config.json +436 -0
  3. maker.py +47 -0
  4. pytorch_model.bin +3 -0
  5. sentencepiece.model +3 -0
  6. special_tokens_map.json +15 -0
  7. tokenizer.json +0 -0
  8. tokenizer_config.json +21 -0
  9. ud.py +61 -0
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "th"
4
+ tags:
5
+ - "thai"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ datasets:
10
+ - "universal_dependencies"
11
+ license: "apache-2.0"
12
+ pipeline_tag: "token-classification"
13
+ widget:
14
+ - text: "หลายหัวดีกว่าหัวเดียว"
15
+ ---
16
+
17
+ # roberta-base-thai-spm-ud-goeswith
18
+
19
+ ## Model Description
20
+
21
+ This is a RoBERTa model pre-trained on Thai Wikipedia texts for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [roberta-base-thai-spm](https://huggingface.co/KoichiYasuoka/roberta-base-thai-spm).
22
+
23
+ ## How to Use
24
+
25
+ ```py
26
+ class UDgoeswith(object):
27
+ def __init__(self,bert):
28
+ from transformers import AutoTokenizer,AutoModelForTokenClassification
29
+ self.tokenizer=AutoTokenizer.from_pretrained(bert)
30
+ self.model=AutoModelForTokenClassification.from_pretrained(bert)
31
+ def __call__(self,text):
32
+ import numpy,torch,ufal.chu_liu_edmonds
33
+ w=self.tokenizer(text,return_offsets_mapping=True)
34
+ v=w["input_ids"]
35
+ x=[v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]
36
+ with torch.no_grad():
37
+ e=self.model(input_ids=torch.tensor(x)).logits.numpy()[:,1:-2,:]
38
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
39
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
40
+ g=self.model.config.label2id["X|_|goeswith"]
41
+ r=numpy.tri(e.shape[0])
42
+ for i in range(e.shape[0]):
43
+ for j in range(i+2,e.shape[1]):
44
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
45
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
46
+ m=numpy.full((e.shape[0]+1,e.shape[1]+1),numpy.nan)
47
+ m[1:,1:]=numpy.nanmax(e,axis=2).transpose()
48
+ p=numpy.zeros(m.shape)
49
+ p[1:,1:]=numpy.nanargmax(e,axis=2).transpose()
50
+ for i in range(1,m.shape[0]):
51
+ m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i]
52
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
53
+ if [0 for i in h if i==0]!=[0]:
54
+ m[:,0]+=numpy.where(m[:,0]==numpy.nanmax(m[[i for i,j in enumerate(h) if j==0],0]),0,numpy.nan)
55
+ m[[i for i,j in enumerate(h) if j==0]]+=[0 if i==0 or j==0 else numpy.nan for i,j in enumerate(h)]
56
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
57
+ u="# text = "+text+"\n"
58
+ v=[(s,e) for s,e in w["offset_mapping"] if s<e]
59
+ for i,(s,e) in enumerate(v,1):
60
+ q=self.model.config.id2label[p[i,h[i]]].split("|")
61
+ u+="\t".join([str(i),text[s:e],"_",q[0],"_","|".join(q[1:-1]),str(h[i]),q[-1],"_","_" if i<len(v) and e<v[i][0] else "SpaceAfter=No"])+"\n"
62
+ return u+"\n"
63
+
64
+ nlp=UDgoeswith("KoichiYasuoka/roberta-base-thai-spm-ud-goeswith")
65
+ print(nlp("หลายหัวดีกว่าหัวเดียว"))
66
+ ```
67
+
68
+ with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/).
69
+ Or without ufal.chu-liu-edmonds:
70
+
71
+ ```
72
+ from transformers import pipeline
73
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/roberta-base-thai-spm-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
74
+ print(nlp("หลายหัวดีกว่าหัวเดียว"))
75
+ ```
76
+
config.json ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "custom_pipelines": {
9
+ "universal-dependencies": {
10
+ "impl": "ud.UniversalDependenciesPipeline"
11
+ }
12
+ },
13
+ "eos_token_id": 2,
14
+ "hidden_act": "gelu",
15
+ "hidden_dropout_prob": 0.1,
16
+ "hidden_size": 768,
17
+ "id2label": {
18
+ "0": "-|_|dep",
19
+ "1": "ADP|_|acl",
20
+ "2": "ADP|_|advcl",
21
+ "3": "ADP|_|advmod",
22
+ "4": "ADP|_|appos",
23
+ "5": "ADP|_|case",
24
+ "6": "ADP|_|cc",
25
+ "7": "ADP|_|cc:preconj",
26
+ "8": "ADP|_|csubj",
27
+ "9": "ADP|_|fixed",
28
+ "10": "ADP|_|mark",
29
+ "11": "ADP|_|obl",
30
+ "12": "ADP|_|root",
31
+ "13": "ADV|_|advcl",
32
+ "14": "ADV|_|advmod",
33
+ "15": "ADV|_|aux",
34
+ "16": "ADV|_|cc",
35
+ "17": "ADV|_|ccomp",
36
+ "18": "ADV|_|conj",
37
+ "19": "ADV|_|fixed",
38
+ "20": "ADV|_|mark",
39
+ "21": "ADV|_|obj",
40
+ "22": "ADV|_|root",
41
+ "23": "ADV|_|xcomp",
42
+ "24": "AUX|_|advmod",
43
+ "25": "AUX|_|aux",
44
+ "26": "AUX|_|aux:pass",
45
+ "27": "AUX|_|ccomp",
46
+ "28": "AUX|_|conj",
47
+ "29": "AUX|_|cop",
48
+ "30": "AUX|_|mark",
49
+ "31": "CCONJ|_|advmod",
50
+ "32": "CCONJ|_|case",
51
+ "33": "CCONJ|_|cc",
52
+ "34": "CCONJ|_|compound",
53
+ "35": "CCONJ|_|conj",
54
+ "36": "CCONJ|_|fixed",
55
+ "37": "CCONJ|_|mark",
56
+ "38": "CCONJ|_|nsubj",
57
+ "39": "CCONJ|_|obl",
58
+ "40": "CCONJ|_|root",
59
+ "41": "DET|_|advmod",
60
+ "42": "DET|_|case",
61
+ "43": "DET|_|cc:preconj",
62
+ "44": "DET|_|conj",
63
+ "45": "DET|_|det",
64
+ "46": "DET|_|det:predet",
65
+ "47": "DET|_|fixed",
66
+ "48": "DET|_|mark",
67
+ "49": "DET|_|nsubj",
68
+ "50": "DET|_|nsubj:pass",
69
+ "51": "DET|_|obj",
70
+ "52": "DET|_|obl",
71
+ "53": "DET|_|obl:tmod",
72
+ "54": "DET|_|root",
73
+ "55": "INTJ|_|acl",
74
+ "56": "INTJ|_|nsubj",
75
+ "57": "INTJ|_|root",
76
+ "58": "NOUN|_|acl",
77
+ "59": "NOUN|_|acl:relcl",
78
+ "60": "NOUN|_|advcl",
79
+ "61": "NOUN|_|advmod",
80
+ "62": "NOUN|_|appos",
81
+ "63": "NOUN|_|aux",
82
+ "64": "NOUN|_|case",
83
+ "65": "NOUN|_|cc",
84
+ "66": "NOUN|_|ccomp",
85
+ "67": "NOUN|_|clf",
86
+ "68": "NOUN|_|compound",
87
+ "69": "NOUN|_|conj",
88
+ "70": "NOUN|_|dislocated",
89
+ "71": "NOUN|_|fixed",
90
+ "72": "NOUN|_|flat:name",
91
+ "73": "NOUN|_|iobj",
92
+ "74": "NOUN|_|mark",
93
+ "75": "NOUN|_|nmod",
94
+ "76": "NOUN|_|nmod:poss",
95
+ "77": "NOUN|_|nsubj",
96
+ "78": "NOUN|_|nsubj:pass",
97
+ "79": "NOUN|_|obj",
98
+ "80": "NOUN|_|obl",
99
+ "81": "NOUN|_|obl:poss",
100
+ "82": "NOUN|_|obl:tmod",
101
+ "83": "NOUN|_|parataxis",
102
+ "84": "NOUN|_|root",
103
+ "85": "NOUN|_|vocative",
104
+ "86": "NOUN|_|xcomp",
105
+ "87": "NUM|_|acl",
106
+ "88": "NUM|_|acl:relcl",
107
+ "89": "NUM|_|advmod",
108
+ "90": "NUM|_|appos",
109
+ "91": "NUM|_|ccomp",
110
+ "92": "NUM|_|clf",
111
+ "93": "NUM|_|conj",
112
+ "94": "NUM|_|flat:name",
113
+ "95": "NUM|_|nmod",
114
+ "96": "NUM|_|nsubj",
115
+ "97": "NUM|_|nummod",
116
+ "98": "NUM|_|obj",
117
+ "99": "NUM|_|obl",
118
+ "100": "NUM|_|obl:poss",
119
+ "101": "NUM|_|obl:tmod",
120
+ "102": "NUM|_|root",
121
+ "103": "NUM|_|xcomp",
122
+ "104": "PART|_|acl",
123
+ "105": "PART|_|advmod",
124
+ "106": "PART|_|aux",
125
+ "107": "PART|_|cc",
126
+ "108": "PART|_|cc:preconj",
127
+ "109": "PART|_|ccomp",
128
+ "110": "PART|_|clf",
129
+ "111": "PART|_|compound",
130
+ "112": "PART|_|compound:prt",
131
+ "113": "PART|_|conj",
132
+ "114": "PART|_|discourse",
133
+ "115": "PART|_|fixed",
134
+ "116": "PART|_|mark",
135
+ "117": "PART|_|nmod",
136
+ "118": "PART|_|nmod:poss",
137
+ "119": "PART|_|nsubj",
138
+ "120": "PART|_|obj",
139
+ "121": "PART|_|obl",
140
+ "122": "PART|_|root",
141
+ "123": "PART|_|xcomp",
142
+ "124": "PRON|_|acl",
143
+ "125": "PRON|_|acl:relcl",
144
+ "126": "PRON|_|advcl",
145
+ "127": "PRON|_|advmod",
146
+ "128": "PRON|_|appos",
147
+ "129": "PRON|_|ccomp",
148
+ "130": "PRON|_|compound",
149
+ "131": "PRON|_|conj",
150
+ "132": "PRON|_|fixed",
151
+ "133": "PRON|_|nmod",
152
+ "134": "PRON|_|nmod:poss",
153
+ "135": "PRON|_|nsubj",
154
+ "136": "PRON|_|nsubj:pass",
155
+ "137": "PRON|_|obj",
156
+ "138": "PRON|_|obl",
157
+ "139": "PRON|_|obl:poss",
158
+ "140": "PRON|_|reparandum",
159
+ "141": "PRON|_|root",
160
+ "142": "PRON|_|xcomp",
161
+ "143": "PROPN|_|acl",
162
+ "144": "PROPN|_|acl:relcl",
163
+ "145": "PROPN|_|advmod",
164
+ "146": "PROPN|_|appos",
165
+ "147": "PROPN|_|aux",
166
+ "148": "PROPN|_|cc",
167
+ "149": "PROPN|_|ccomp",
168
+ "150": "PROPN|_|clf",
169
+ "151": "PROPN|_|compound",
170
+ "152": "PROPN|_|conj",
171
+ "153": "PROPN|_|flat:name",
172
+ "154": "PROPN|_|goeswith",
173
+ "155": "PROPN|_|nmod",
174
+ "156": "PROPN|_|nmod:poss",
175
+ "157": "PROPN|_|nsubj",
176
+ "158": "PROPN|_|nsubj:pass",
177
+ "159": "PROPN|_|obj",
178
+ "160": "PROPN|_|obl",
179
+ "161": "PROPN|_|obl:poss",
180
+ "162": "PROPN|_|obl:tmod",
181
+ "163": "PROPN|_|root",
182
+ "164": "PROPN|_|xcomp",
183
+ "165": "PUNCT|_|advmod",
184
+ "166": "PUNCT|_|clf",
185
+ "167": "PUNCT|_|punct",
186
+ "168": "PUNCT|_|root",
187
+ "169": "SCONJ|_|mark",
188
+ "170": "SYM|_|advmod",
189
+ "171": "SYM|_|clf",
190
+ "172": "SYM|_|nsubj",
191
+ "173": "SYM|_|obj",
192
+ "174": "SYM|_|obl",
193
+ "175": "VERB|_|acl",
194
+ "176": "VERB|_|acl:relcl",
195
+ "177": "VERB|_|advcl",
196
+ "178": "VERB|_|advmod",
197
+ "179": "VERB|_|appos",
198
+ "180": "VERB|_|aux",
199
+ "181": "VERB|_|aux:pass",
200
+ "182": "VERB|_|case",
201
+ "183": "VERB|_|cc",
202
+ "184": "VERB|_|ccomp",
203
+ "185": "VERB|_|compound",
204
+ "186": "VERB|_|conj",
205
+ "187": "VERB|_|csubj",
206
+ "188": "VERB|_|fixed",
207
+ "189": "VERB|_|mark",
208
+ "190": "VERB|_|nmod",
209
+ "191": "VERB|_|nmod:poss",
210
+ "192": "VERB|_|nsubj",
211
+ "193": "VERB|_|obj",
212
+ "194": "VERB|_|obl",
213
+ "195": "VERB|_|obl:poss",
214
+ "196": "VERB|_|parataxis",
215
+ "197": "VERB|_|root",
216
+ "198": "VERB|_|xcomp",
217
+ "199": "X|_|goeswith"
218
+ },
219
+ "initializer_range": 0.02,
220
+ "intermediate_size": 3072,
221
+ "label2id": {
222
+ "-|_|dep": 0,
223
+ "ADP|_|acl": 1,
224
+ "ADP|_|advcl": 2,
225
+ "ADP|_|advmod": 3,
226
+ "ADP|_|appos": 4,
227
+ "ADP|_|case": 5,
228
+ "ADP|_|cc": 6,
229
+ "ADP|_|cc:preconj": 7,
230
+ "ADP|_|csubj": 8,
231
+ "ADP|_|fixed": 9,
232
+ "ADP|_|mark": 10,
233
+ "ADP|_|obl": 11,
234
+ "ADP|_|root": 12,
235
+ "ADV|_|advcl": 13,
236
+ "ADV|_|advmod": 14,
237
+ "ADV|_|aux": 15,
238
+ "ADV|_|cc": 16,
239
+ "ADV|_|ccomp": 17,
240
+ "ADV|_|conj": 18,
241
+ "ADV|_|fixed": 19,
242
+ "ADV|_|mark": 20,
243
+ "ADV|_|obj": 21,
244
+ "ADV|_|root": 22,
245
+ "ADV|_|xcomp": 23,
246
+ "AUX|_|advmod": 24,
247
+ "AUX|_|aux": 25,
248
+ "AUX|_|aux:pass": 26,
249
+ "AUX|_|ccomp": 27,
250
+ "AUX|_|conj": 28,
251
+ "AUX|_|cop": 29,
252
+ "AUX|_|mark": 30,
253
+ "CCONJ|_|advmod": 31,
254
+ "CCONJ|_|case": 32,
255
+ "CCONJ|_|cc": 33,
256
+ "CCONJ|_|compound": 34,
257
+ "CCONJ|_|conj": 35,
258
+ "CCONJ|_|fixed": 36,
259
+ "CCONJ|_|mark": 37,
260
+ "CCONJ|_|nsubj": 38,
261
+ "CCONJ|_|obl": 39,
262
+ "CCONJ|_|root": 40,
263
+ "DET|_|advmod": 41,
264
+ "DET|_|case": 42,
265
+ "DET|_|cc:preconj": 43,
266
+ "DET|_|conj": 44,
267
+ "DET|_|det": 45,
268
+ "DET|_|det:predet": 46,
269
+ "DET|_|fixed": 47,
270
+ "DET|_|mark": 48,
271
+ "DET|_|nsubj": 49,
272
+ "DET|_|nsubj:pass": 50,
273
+ "DET|_|obj": 51,
274
+ "DET|_|obl": 52,
275
+ "DET|_|obl:tmod": 53,
276
+ "DET|_|root": 54,
277
+ "INTJ|_|acl": 55,
278
+ "INTJ|_|nsubj": 56,
279
+ "INTJ|_|root": 57,
280
+ "NOUN|_|acl": 58,
281
+ "NOUN|_|acl:relcl": 59,
282
+ "NOUN|_|advcl": 60,
283
+ "NOUN|_|advmod": 61,
284
+ "NOUN|_|appos": 62,
285
+ "NOUN|_|aux": 63,
286
+ "NOUN|_|case": 64,
287
+ "NOUN|_|cc": 65,
288
+ "NOUN|_|ccomp": 66,
289
+ "NOUN|_|clf": 67,
290
+ "NOUN|_|compound": 68,
291
+ "NOUN|_|conj": 69,
292
+ "NOUN|_|dislocated": 70,
293
+ "NOUN|_|fixed": 71,
294
+ "NOUN|_|flat:name": 72,
295
+ "NOUN|_|iobj": 73,
296
+ "NOUN|_|mark": 74,
297
+ "NOUN|_|nmod": 75,
298
+ "NOUN|_|nmod:poss": 76,
299
+ "NOUN|_|nsubj": 77,
300
+ "NOUN|_|nsubj:pass": 78,
301
+ "NOUN|_|obj": 79,
302
+ "NOUN|_|obl": 80,
303
+ "NOUN|_|obl:poss": 81,
304
+ "NOUN|_|obl:tmod": 82,
305
+ "NOUN|_|parataxis": 83,
306
+ "NOUN|_|root": 84,
307
+ "NOUN|_|vocative": 85,
308
+ "NOUN|_|xcomp": 86,
309
+ "NUM|_|acl": 87,
310
+ "NUM|_|acl:relcl": 88,
311
+ "NUM|_|advmod": 89,
312
+ "NUM|_|appos": 90,
313
+ "NUM|_|ccomp": 91,
314
+ "NUM|_|clf": 92,
315
+ "NUM|_|conj": 93,
316
+ "NUM|_|flat:name": 94,
317
+ "NUM|_|nmod": 95,
318
+ "NUM|_|nsubj": 96,
319
+ "NUM|_|nummod": 97,
320
+ "NUM|_|obj": 98,
321
+ "NUM|_|obl": 99,
322
+ "NUM|_|obl:poss": 100,
323
+ "NUM|_|obl:tmod": 101,
324
+ "NUM|_|root": 102,
325
+ "NUM|_|xcomp": 103,
326
+ "PART|_|acl": 104,
327
+ "PART|_|advmod": 105,
328
+ "PART|_|aux": 106,
329
+ "PART|_|cc": 107,
330
+ "PART|_|cc:preconj": 108,
331
+ "PART|_|ccomp": 109,
332
+ "PART|_|clf": 110,
333
+ "PART|_|compound": 111,
334
+ "PART|_|compound:prt": 112,
335
+ "PART|_|conj": 113,
336
+ "PART|_|discourse": 114,
337
+ "PART|_|fixed": 115,
338
+ "PART|_|mark": 116,
339
+ "PART|_|nmod": 117,
340
+ "PART|_|nmod:poss": 118,
341
+ "PART|_|nsubj": 119,
342
+ "PART|_|obj": 120,
343
+ "PART|_|obl": 121,
344
+ "PART|_|root": 122,
345
+ "PART|_|xcomp": 123,
346
+ "PRON|_|acl": 124,
347
+ "PRON|_|acl:relcl": 125,
348
+ "PRON|_|advcl": 126,
349
+ "PRON|_|advmod": 127,
350
+ "PRON|_|appos": 128,
351
+ "PRON|_|ccomp": 129,
352
+ "PRON|_|compound": 130,
353
+ "PRON|_|conj": 131,
354
+ "PRON|_|fixed": 132,
355
+ "PRON|_|nmod": 133,
356
+ "PRON|_|nmod:poss": 134,
357
+ "PRON|_|nsubj": 135,
358
+ "PRON|_|nsubj:pass": 136,
359
+ "PRON|_|obj": 137,
360
+ "PRON|_|obl": 138,
361
+ "PRON|_|obl:poss": 139,
362
+ "PRON|_|reparandum": 140,
363
+ "PRON|_|root": 141,
364
+ "PRON|_|xcomp": 142,
365
+ "PROPN|_|acl": 143,
366
+ "PROPN|_|acl:relcl": 144,
367
+ "PROPN|_|advmod": 145,
368
+ "PROPN|_|appos": 146,
369
+ "PROPN|_|aux": 147,
370
+ "PROPN|_|cc": 148,
371
+ "PROPN|_|ccomp": 149,
372
+ "PROPN|_|clf": 150,
373
+ "PROPN|_|compound": 151,
374
+ "PROPN|_|conj": 152,
375
+ "PROPN|_|flat:name": 153,
376
+ "PROPN|_|goeswith": 154,
377
+ "PROPN|_|nmod": 155,
378
+ "PROPN|_|nmod:poss": 156,
379
+ "PROPN|_|nsubj": 157,
380
+ "PROPN|_|nsubj:pass": 158,
381
+ "PROPN|_|obj": 159,
382
+ "PROPN|_|obl": 160,
383
+ "PROPN|_|obl:poss": 161,
384
+ "PROPN|_|obl:tmod": 162,
385
+ "PROPN|_|root": 163,
386
+ "PROPN|_|xcomp": 164,
387
+ "PUNCT|_|advmod": 165,
388
+ "PUNCT|_|clf": 166,
389
+ "PUNCT|_|punct": 167,
390
+ "PUNCT|_|root": 168,
391
+ "SCONJ|_|mark": 169,
392
+ "SYM|_|advmod": 170,
393
+ "SYM|_|clf": 171,
394
+ "SYM|_|nsubj": 172,
395
+ "SYM|_|obj": 173,
396
+ "SYM|_|obl": 174,
397
+ "VERB|_|acl": 175,
398
+ "VERB|_|acl:relcl": 176,
399
+ "VERB|_|advcl": 177,
400
+ "VERB|_|advmod": 178,
401
+ "VERB|_|appos": 179,
402
+ "VERB|_|aux": 180,
403
+ "VERB|_|aux:pass": 181,
404
+ "VERB|_|case": 182,
405
+ "VERB|_|cc": 183,
406
+ "VERB|_|ccomp": 184,
407
+ "VERB|_|compound": 185,
408
+ "VERB|_|conj": 186,
409
+ "VERB|_|csubj": 187,
410
+ "VERB|_|fixed": 188,
411
+ "VERB|_|mark": 189,
412
+ "VERB|_|nmod": 190,
413
+ "VERB|_|nmod:poss": 191,
414
+ "VERB|_|nsubj": 192,
415
+ "VERB|_|obj": 193,
416
+ "VERB|_|obl": 194,
417
+ "VERB|_|obl:poss": 195,
418
+ "VERB|_|parataxis": 196,
419
+ "VERB|_|root": 197,
420
+ "VERB|_|xcomp": 198,
421
+ "X|_|goeswith": 199
422
+ },
423
+ "layer_norm_eps": 1e-12,
424
+ "max_position_embeddings": 512,
425
+ "model_type": "roberta",
426
+ "num_attention_heads": 12,
427
+ "num_hidden_layers": 12,
428
+ "pad_token_id": 1,
429
+ "position_embedding_type": "absolute",
430
+ "tokenizer_class": "RemBertTokenizerFast",
431
+ "torch_dtype": "float32",
432
+ "transformers_version": "4.22.1",
433
+ "type_vocab_size": 2,
434
+ "use_cache": true,
435
+ "vocab_size": 3005
436
+ }
maker.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="KoichiYasuoka/roberta-base-thai-spm"
3
+ tgt="KoichiYasuoka/roberta-base-thai-spm-ud-goeswith"
4
+ url="https://github.com/KoichiYasuoka/spaCy-Thai"
5
+ import os
6
+ d=os.path.join(os.path.basename(url),"UD_Thai-Corpora")
7
+ os.system("test -d {} || git clone --depth=1 {}".format(d,url))
8
+ s='{if(NF>0)u=u$0"\\n";else{if(u~/\\t0\\troot\\t/)print u>"train.conllu";u=""}}'
9
+ os.system("nawk -F'\\t' '{}' {}/*-ud-*.conllu".format(s,d))
10
+ class UDgoeswithDataset(object):
11
+ def __init__(self,conllu,tokenizer):
12
+ self.ids,self.tags,label=[],[],set()
13
+ with open(conllu,"r",encoding="utf-8") as r:
14
+ cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
15
+ dep,c="-|_|dep",[]
16
+ for s in r:
17
+ t=s.split("\t")
18
+ if len(t)==10 and t[0].isdecimal():
19
+ c.append(t)
20
+ elif c!=[]:
21
+ v=tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
22
+ for i in range(len(v)-1,-1,-1):
23
+ for j in range(1,len(v[i])):
24
+ c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
25
+ y=["0"]+[t[0] for t in c]
26
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(c,1)]
27
+ p,v=[t[3]+"|_|"+t[7] for t in c],sum(v,[])
28
+ self.ids.append([cls]+v+[sep])
29
+ self.tags.append([dep]+p+[dep])
30
+ label=set(sum([self.tags[-1],list(label)],[]))
31
+ for i,k in enumerate(v):
32
+ self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
33
+ self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
34
+ c=[]
35
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
36
+ __len__=lambda self:len(self.ids)
37
+ __getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
38
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
39
+ tkz=AutoTokenizer.from_pretrained(src)
40
+ trainDS=UDgoeswithDataset("train.conllu",tkz)
41
+ lid=trainDS.label2id
42
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()})
43
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1)
44
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg),train_dataset=trainDS)
45
+ trn.train()
46
+ trn.save_model(tgt)
47
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:990574f354bc5c07915f67c322132900a90fa675991ebc32d1de2c39f03a34a8
3
+ size 351720561
sentencepiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
3
+ size 1
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": {
6
+ "content": "[MASK]",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "[PAD]",
13
+ "sep_token": "[SEP]",
14
+ "unk_token": "[UNK]"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "keep_accents": true,
7
+ "mask_token": {
8
+ "__type": "AddedToken",
9
+ "content": "[MASK]",
10
+ "lstrip": true,
11
+ "normalized": true,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
+ "model_max_length": 512,
16
+ "pad_token": "[PAD]",
17
+ "remove_space": true,
18
+ "sep_token": "[SEP]",
19
+ "tokenizer_class": "RemBertTokenizerFast",
20
+ "unk_token": "[UNK]"
21
+ }
ud.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TokenClassificationPipeline
2
+
3
+ class UniversalDependenciesPipeline(TokenClassificationPipeline):
4
+ def _forward(self,model_input):
5
+ import torch
6
+ v=model_input["input_ids"][0].tolist()
7
+ with torch.no_grad():
8
+ e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]))
9
+ return {"logits":e.logits[:,1:-2,:],**model_input}
10
+ def postprocess(self,model_output,**kwargs):
11
+ import numpy
12
+ e=model_output["logits"].numpy()
13
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
14
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
15
+ g=self.model.config.label2id["X|_|goeswith"]
16
+ r=numpy.tri(e.shape[0])
17
+ for i in range(e.shape[0]):
18
+ for j in range(i+2,e.shape[1]):
19
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
20
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
21
+ m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
22
+ h=self.chu_liu_edmonds(m)
23
+ z=[i for i,j in enumerate(h) if i==j]
24
+ if len(z)>1:
25
+ k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
26
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
27
+ h=self.chu_liu_edmonds(m)
28
+ v=[(s,e) for s,e in model_output["offset_mapping"][0].tolist() if s<e]
29
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
30
+ g="aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none"
31
+ if g:
32
+ for i,j in reversed(list(enumerate(q[1:],1))):
33
+ if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
34
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
35
+ v[i-1]=(v[i-1][0],v.pop(i)[1])
36
+ q.pop(i)
37
+ t=model_output["sentence"].replace("\n"," ")
38
+ u="# text = "+t+"\n"
39
+ for i,(s,e) in enumerate(v):
40
+ u+="\t".join([str(i+1),t[s:e],t[s:e] if g else "_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
41
+ return u+"\n"
42
+ def chu_liu_edmonds(self,matrix):
43
+ import numpy
44
+ h=numpy.nanargmax(matrix,axis=0)
45
+ x=[-1 if i==j else j for i,j in enumerate(h)]
46
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
47
+ y=[]
48
+ while x!=y:
49
+ y=list(x)
50
+ for i,j in enumerate(x):
51
+ x[i]=b(x,i,j)
52
+ if max(x)<0:
53
+ return h
54
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
55
+ z=matrix-numpy.nanmax(matrix,axis=0)
56
+ m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
57
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
58
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
59
+ i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
60
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
61
+ return h