KoichiYasuoka commited on
Commit
a7caa81
1 Parent(s): efc5536

initial release

Browse files
Files changed (9) hide show
  1. README.md +75 -0
  2. config.json +279 -0
  3. maker.py +50 -0
  4. pytorch_model.bin +3 -0
  5. sentencepiece.model +3 -0
  6. special_tokens_map.json +15 -0
  7. tokenizer.json +0 -0
  8. tokenizer_config.json +22 -0
  9. ud.py +72 -0
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "ain"
4
+ tags:
5
+ - "ainu"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ license: "cc-by-sa-4.0"
10
+ pipeline_tag: "token-classification"
11
+ widget:
12
+ - text: "itak=as awa pon rupne aynu ene itaki"
13
+ - text: "イタカㇱ アワ ポン ルㇷ゚ネ アイヌ エネ イタキ"
14
+ ---
15
+
16
+ # roberta-base-ainu-ud-goeswith
17
+
18
+ ## Model Description
19
+
20
+ This is a RoBERTa model pre-trained on Ainu texts (both カタカナ and romanized) for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [roberta-base-ainu-upos](https://huggingface.co/KoichiYasuoka/roberta-base-ainu-upos).
21
+
22
+ ## How to Use
23
+
24
+ ```py
25
+ class UDgoeswith(object):
26
+ def __init__(self,bert):
27
+ from transformers import AutoTokenizer,AutoModelForTokenClassification
28
+ self.tokenizer=AutoTokenizer.from_pretrained(bert)
29
+ self.model=AutoModelForTokenClassification.from_pretrained(bert)
30
+ def __call__(self,text):
31
+ import numpy,torch,ufal.chu_liu_edmonds
32
+ w=self.tokenizer(text,return_offsets_mapping=True)
33
+ v=w["input_ids"]
34
+ x=[v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]
35
+ with torch.no_grad():
36
+ e=self.model(input_ids=torch.tensor(x)).logits.numpy()[:,1:-2,:]
37
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
38
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
39
+ g=self.model.config.label2id["X|_|goeswith"]
40
+ r=numpy.tri(e.shape[0])
41
+ for i in range(e.shape[0]):
42
+ for j in range(i+2,e.shape[1]):
43
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
44
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
45
+ m=numpy.full((e.shape[0]+1,e.shape[1]+1),numpy.nan)
46
+ m[1:,1:]=numpy.nanmax(e,axis=2).transpose()
47
+ p=numpy.zeros(m.shape)
48
+ p[1:,1:]=numpy.nanargmax(e,axis=2).transpose()
49
+ for i in range(1,m.shape[0]):
50
+ m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i]
51
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
52
+ if [0 for i in h if i==0]!=[0]:
53
+ m[:,0]+=numpy.where(m[:,0]==numpy.nanmax(m[[i for i,j in enumerate(h) if j==0],0]),0,numpy.nan)
54
+ m[[i for i,j in enumerate(h) if j==0]]+=[0 if i==0 or j==0 else numpy.nan for i,j in enumerate(h)]
55
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
56
+ u="# text = "+text+"\n"
57
+ v=[(s,e) for s,e in w["offset_mapping"] if s<e]
58
+ for i,(s,e) in enumerate(v,1):
59
+ q=self.model.config.id2label[p[i,h[i]]].split("|")
60
+ u+="\t".join([str(i),text[s:e],"_",q[0],"|".join(q[1:-1]),"_",str(h[i]),q[-1],"_","_" if i<len(v) and e<v[i][0] else "SpaceAfter=No"])+"\n"
61
+ return u+"\n"
62
+
63
+ nlp=UDgoeswith("KoichiYasuoka/roberta-base-ainu-ud-goeswith")
64
+ print(nlp("itak=as awa pon rupne aynu ene itaki"))
65
+ ```
66
+
67
+ with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/).
68
+ Or without ufal.chu-liu-edmonds:
69
+
70
+ ```
71
+ from transformers import pipeline
72
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/roberta-base-ainu-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
73
+ print(nlp("itak=as awa pon rupne aynu ene itaki"))
74
+ ```
75
+
config.json ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "KoichiYasuoka/roberta-base-ainu-upos",
3
+ "architectures": [
4
+ "RobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "custom_pipelines": {
10
+ "universal-dependencies": {
11
+ "impl": "ud.UniversalDependenciesPipeline"
12
+ }
13
+ },
14
+ "eos_token_id": 2,
15
+ "hidden_act": "gelu",
16
+ "hidden_dropout_prob": 0.1,
17
+ "hidden_size": 768,
18
+ "id2label": {
19
+ "0": "-|_|dep",
20
+ "1": "ADP|\u526f\u52a9\u8a5e|case",
21
+ "2": "ADP|\u526f\u52a9\u8a5e|mark",
22
+ "3": "ADP|\u526f\u52a9\u8a5e|root",
23
+ "4": "ADP|\u5f8c\u7f6e\u526f\u8a5e|root",
24
+ "5": "ADP|\u683c\u52a9\u8a5e|case",
25
+ "6": "ADP|\u683c\u52a9\u8a5e|mark",
26
+ "7": "ADV|\u526f\u8a5e|acl",
27
+ "8": "ADV|\u526f\u8a5e|advcl",
28
+ "9": "ADV|\u526f\u8a5e|advmod",
29
+ "10": "ADV|\u526f\u8a5e|amod",
30
+ "11": "ADV|\u526f\u8a5e|conj",
31
+ "12": "ADV|\u526f\u8a5e|parataxis",
32
+ "13": "ADV|\u526f\u8a5e|root",
33
+ "14": "AUX|\u30c7\u30a2\u30eb\u52d5\u8a5e|cop",
34
+ "15": "AUX|\u52a9\u52d5\u8a5e|aux",
35
+ "16": "AUX|\u52a9\u52d5\u8a5e|case",
36
+ "17": "CCONJ|\u63a5\u7d9a\u52a9\u8a5e|cc",
37
+ "18": "CCONJ|\u63a5\u7d9a\u52a9\u8a5e|root",
38
+ "19": "CCONJ|\u63a5\u7d9a\u8a5e|cc",
39
+ "20": "CCONJ|\u63a5\u7d9a\u8a5e|mark",
40
+ "21": "DET|\u9023\u4f53\u8a5e|acl",
41
+ "22": "DET|\u9023\u4f53\u8a5e|det",
42
+ "23": "DET|\u9023\u4f53\u8a5e|parataxis",
43
+ "24": "DET|\u9023\u4f53\u8a5e|root",
44
+ "25": "INTJ|\u9593\u6295\u8a5e|conj",
45
+ "26": "INTJ|\u9593\u6295\u8a5e|discource",
46
+ "27": "INTJ|\u9593\u6295\u8a5e|discourse",
47
+ "28": "INTJ|\u9593\u6295\u8a5e|parataxis",
48
+ "29": "INTJ|\u9593\u6295\u8a5e|root",
49
+ "30": "NOUN|\u4ee3\u540d\u8a5e|nsubj",
50
+ "31": "NOUN|\u4ee3\u540d\u8a5e|parataxis",
51
+ "32": "NOUN|\u4ee3\u540d\u8a5e|root",
52
+ "33": "NOUN|\u4f4d\u7f6e\u540d\u8a5e|conj",
53
+ "34": "NOUN|\u4f4d\u7f6e\u540d\u8a5e|iobj",
54
+ "35": "NOUN|\u4f4d\u7f6e\u540d\u8a5e|nmod",
55
+ "36": "NOUN|\u4f4d\u7f6e\u540d\u8a5e|nsubj",
56
+ "37": "NOUN|\u4f4d\u7f6e\u540d\u8a5e|obj",
57
+ "38": "NOUN|\u4f4d\u7f6e\u540d\u8a5e|obl",
58
+ "39": "NOUN|\u4f4d\u7f6e\u540d\u8a5e|parataxis",
59
+ "40": "NOUN|\u4f4d\u7f6e\u540d\u8a5e|root",
60
+ "41": "NOUN|\u540d\u8a5e|acl",
61
+ "42": "NOUN|\u540d\u8a5e|advcl",
62
+ "43": "NOUN|\u540d\u8a5e|appos",
63
+ "44": "NOUN|\u540d\u8a5e|compound",
64
+ "45": "NOUN|\u540d\u8a5e|conj",
65
+ "46": "NOUN|\u540d\u8a5e|iobj",
66
+ "47": "NOUN|\u540d\u8a5e|nmod",
67
+ "48": "NOUN|\u540d\u8a5e|nsubj",
68
+ "49": "NOUN|\u540d\u8a5e|obj",
69
+ "50": "NOUN|\u540d\u8a5e|obl",
70
+ "51": "NOUN|\u540d\u8a5e|parataxis",
71
+ "52": "NOUN|\u540d\u8a5e|root",
72
+ "53": "NOUN|\u540d\u8a5e|vocative",
73
+ "54": "NOUN|\u5f62\u5f0f\u540d\u8a5e|advcl",
74
+ "55": "NOUN|\u5f62\u5f0f\u540d\u8a5e|conj",
75
+ "56": "NOUN|\u5f62\u5f0f\u540d\u8a5e|nmod",
76
+ "57": "NOUN|\u5f62\u5f0f\u540d\u8a5e|nsubj",
77
+ "58": "NOUN|\u5f62\u5f0f\u540d\u8a5e|obj",
78
+ "59": "NOUN|\u5f62\u5f0f\u540d\u8a5e|obl",
79
+ "60": "NOUN|\u5f62\u5f0f\u540d\u8a5e|parataxis",
80
+ "61": "NOUN|\u5f62\u5f0f\u540d\u8a5e|root",
81
+ "62": "NUM|\u6570\u8a5e|conj",
82
+ "63": "NUM|\u6570\u8a5e|nummod",
83
+ "64": "NUM|\u6570\u8a5e|root",
84
+ "65": "PART|\u4eba\u79f0\u63a5\u8f9e|det",
85
+ "66": "PART|\u4eba\u79f0\u63a5\u8f9e|expl",
86
+ "67": "PART|\u4eba\u79f0\u63a5\u8f9e|iobj",
87
+ "68": "PART|\u4eba\u79f0\u63a5\u8f9e|nsubj",
88
+ "69": "PART|\u4eba\u79f0\u63a5\u8f9e|obj",
89
+ "70": "PART|\u4eba\u79f0\u63a5\u8f9e|parataxis",
90
+ "71": "PART|\u4eba\u79f0\u63a5\u8f9e|root",
91
+ "72": "PART|\u63a5\u5c3e\u8f9e|conj",
92
+ "73": "PART|\u63a5\u5c3e\u8f9e|fixed",
93
+ "74": "PART|\u63a5\u5c3e\u8f9e|obl",
94
+ "75": "PART|\u63a5\u5c3e\u8f9e|root",
95
+ "76": "PART|\u63a5\u982d\u8f9e|compound",
96
+ "77": "PART|\u63a5\u982d\u8f9e|det",
97
+ "78": "PART|\u63a5\u982d\u8f9e|fixed",
98
+ "79": "PART|\u63a5\u982d\u8f9e|iobj",
99
+ "80": "PART|\u63a5\u982d\u8f9e|parataxis",
100
+ "81": "PART|\u7d42\u52a9\u8a5e|discourse",
101
+ "82": "PART|\u7d42\u52a9\u8a5e|mark",
102
+ "83": "PART|\u7d42\u52a9\u8a5e|root",
103
+ "84": "PRON|\u4ee3\u540d\u8a5e|compound",
104
+ "85": "PRON|\u4ee3\u540d\u8a5e|nsubj",
105
+ "86": "PRON|\u4ee3\u540d\u8a5e|root",
106
+ "87": "PROPN|\u56fa\u6709\u540d\u8a5e|nmod",
107
+ "88": "PROPN|\u56fa\u6709\u540d\u8a5e|nsubj",
108
+ "89": "PROPN|\u56fa\u6709\u540d\u8a5e|root",
109
+ "90": "PUNCT|\u8a18\u53f7|punct",
110
+ "91": "SCONJ|\u5f8c\u7f6e\u526f\u8a5e|case",
111
+ "92": "SCONJ|\u5f8c\u7f6e\u526f\u8a5e|parataxis",
112
+ "93": "SCONJ|\u5f8c\u7f6e\u526f\u8a5e|root",
113
+ "94": "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|advmod",
114
+ "95": "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|case",
115
+ "96": "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|cc",
116
+ "97": "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|mark",
117
+ "98": "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|root",
118
+ "99": "SCONJ|\u63a5\u7d9a\u8a5e|case",
119
+ "100": "SCONJ|\u63a5\u7d9a\u8a5e|mark",
120
+ "101": "VERB|\u4ed6\u52d5\u8a5e|acl",
121
+ "102": "VERB|\u4ed6\u52d5\u8a5e|advcl",
122
+ "103": "VERB|\u4ed6\u52d5\u8a5e|amod",
123
+ "104": "VERB|\u4ed6\u52d5\u8a5e|ccomp",
124
+ "105": "VERB|\u4ed6\u52d5\u8a5e|conj",
125
+ "106": "VERB|\u4ed6\u52d5\u8a5e|parataxis",
126
+ "107": "VERB|\u4ed6\u52d5\u8a5e|root",
127
+ "108": "VERB|\u4ed6\u52d5\u8a5e\uff1f|root",
128
+ "109": "VERB|\u5b8c\u5168\u52d5\u8a5e|acl",
129
+ "110": "VERB|\u5b8c\u5168\u52d5\u8a5e|advcl",
130
+ "111": "VERB|\u5b8c\u5168\u52d5\u8a5e|parataxis",
131
+ "112": "VERB|\u5b8c\u5168\u52d5\u8a5e|root",
132
+ "113": "VERB|\u81ea\u52d5\u8a5e|acl",
133
+ "114": "VERB|\u81ea\u52d5\u8a5e|advcl",
134
+ "115": "VERB|\u81ea\u52d5\u8a5e|amod",
135
+ "116": "VERB|\u81ea\u52d5\u8a5e|ccomp",
136
+ "117": "VERB|\u81ea\u52d5\u8a5e|conj",
137
+ "118": "VERB|\u81ea\u52d5\u8a5e|parataxis",
138
+ "119": "VERB|\u81ea\u52d5\u8a5e|root",
139
+ "120": "X|_|goeswith"
140
+ },
141
+ "initializer_range": 0.02,
142
+ "intermediate_size": 3072,
143
+ "label2id": {
144
+ "-|_|dep": 0,
145
+ "ADP|\u526f\u52a9\u8a5e|case": 1,
146
+ "ADP|\u526f\u52a9\u8a5e|mark": 2,
147
+ "ADP|\u526f\u52a9\u8a5e|root": 3,
148
+ "ADP|\u5f8c\u7f6e\u526f\u8a5e|root": 4,
149
+ "ADP|\u683c\u52a9\u8a5e|case": 5,
150
+ "ADP|\u683c\u52a9\u8a5e|mark": 6,
151
+ "ADV|\u526f\u8a5e|acl": 7,
152
+ "ADV|\u526f\u8a5e|advcl": 8,
153
+ "ADV|\u526f\u8a5e|advmod": 9,
154
+ "ADV|\u526f\u8a5e|amod": 10,
155
+ "ADV|\u526f\u8a5e|conj": 11,
156
+ "ADV|\u526f\u8a5e|parataxis": 12,
157
+ "ADV|\u526f\u8a5e|root": 13,
158
+ "AUX|\u30c7\u30a2\u30eb\u52d5\u8a5e|cop": 14,
159
+ "AUX|\u52a9\u52d5\u8a5e|aux": 15,
160
+ "AUX|\u52a9\u52d5\u8a5e|case": 16,
161
+ "CCONJ|\u63a5\u7d9a\u52a9\u8a5e|cc": 17,
162
+ "CCONJ|\u63a5\u7d9a\u52a9\u8a5e|root": 18,
163
+ "CCONJ|\u63a5\u7d9a\u8a5e|cc": 19,
164
+ "CCONJ|\u63a5\u7d9a\u8a5e|mark": 20,
165
+ "DET|\u9023\u4f53\u8a5e|acl": 21,
166
+ "DET|\u9023\u4f53\u8a5e|det": 22,
167
+ "DET|\u9023\u4f53\u8a5e|parataxis": 23,
168
+ "DET|\u9023\u4f53\u8a5e|root": 24,
169
+ "INTJ|\u9593\u6295\u8a5e|conj": 25,
170
+ "INTJ|\u9593\u6295\u8a5e|discource": 26,
171
+ "INTJ|\u9593\u6295\u8a5e|discourse": 27,
172
+ "INTJ|\u9593\u6295\u8a5e|parataxis": 28,
173
+ "INTJ|\u9593\u6295\u8a5e|root": 29,
174
+ "NOUN|\u4ee3\u540d\u8a5e|nsubj": 30,
175
+ "NOUN|\u4ee3\u540d\u8a5e|parataxis": 31,
176
+ "NOUN|\u4ee3\u540d\u8a5e|root": 32,
177
+ "NOUN|\u4f4d\u7f6e\u540d\u8a5e|conj": 33,
178
+ "NOUN|\u4f4d\u7f6e\u540d\u8a5e|iobj": 34,
179
+ "NOUN|\u4f4d\u7f6e\u540d\u8a5e|nmod": 35,
180
+ "NOUN|\u4f4d\u7f6e\u540d\u8a5e|nsubj": 36,
181
+ "NOUN|\u4f4d\u7f6e\u540d\u8a5e|obj": 37,
182
+ "NOUN|\u4f4d\u7f6e\u540d\u8a5e|obl": 38,
183
+ "NOUN|\u4f4d\u7f6e\u540d\u8a5e|parataxis": 39,
184
+ "NOUN|\u4f4d\u7f6e\u540d\u8a5e|root": 40,
185
+ "NOUN|\u540d\u8a5e|acl": 41,
186
+ "NOUN|\u540d\u8a5e|advcl": 42,
187
+ "NOUN|\u540d\u8a5e|appos": 43,
188
+ "NOUN|\u540d\u8a5e|compound": 44,
189
+ "NOUN|\u540d\u8a5e|conj": 45,
190
+ "NOUN|\u540d\u8a5e|iobj": 46,
191
+ "NOUN|\u540d\u8a5e|nmod": 47,
192
+ "NOUN|\u540d\u8a5e|nsubj": 48,
193
+ "NOUN|\u540d\u8a5e|obj": 49,
194
+ "NOUN|\u540d\u8a5e|obl": 50,
195
+ "NOUN|\u540d\u8a5e|parataxis": 51,
196
+ "NOUN|\u540d\u8a5e|root": 52,
197
+ "NOUN|\u540d\u8a5e|vocative": 53,
198
+ "NOUN|\u5f62\u5f0f\u540d\u8a5e|advcl": 54,
199
+ "NOUN|\u5f62\u5f0f\u540d\u8a5e|conj": 55,
200
+ "NOUN|\u5f62\u5f0f\u540d\u8a5e|nmod": 56,
201
+ "NOUN|\u5f62\u5f0f\u540d\u8a5e|nsubj": 57,
202
+ "NOUN|\u5f62\u5f0f\u540d\u8a5e|obj": 58,
203
+ "NOUN|\u5f62\u5f0f\u540d\u8a5e|obl": 59,
204
+ "NOUN|\u5f62\u5f0f\u540d\u8a5e|parataxis": 60,
205
+ "NOUN|\u5f62\u5f0f\u540d\u8a5e|root": 61,
206
+ "NUM|\u6570\u8a5e|conj": 62,
207
+ "NUM|\u6570\u8a5e|nummod": 63,
208
+ "NUM|\u6570\u8a5e|root": 64,
209
+ "PART|\u4eba\u79f0\u63a5\u8f9e|det": 65,
210
+ "PART|\u4eba\u79f0\u63a5\u8f9e|expl": 66,
211
+ "PART|\u4eba\u79f0\u63a5\u8f9e|iobj": 67,
212
+ "PART|\u4eba\u79f0\u63a5\u8f9e|nsubj": 68,
213
+ "PART|\u4eba\u79f0\u63a5\u8f9e|obj": 69,
214
+ "PART|\u4eba\u79f0\u63a5\u8f9e|parataxis": 70,
215
+ "PART|\u4eba\u79f0\u63a5\u8f9e|root": 71,
216
+ "PART|\u63a5\u5c3e\u8f9e|conj": 72,
217
+ "PART|\u63a5\u5c3e\u8f9e|fixed": 73,
218
+ "PART|\u63a5\u5c3e\u8f9e|obl": 74,
219
+ "PART|\u63a5\u5c3e\u8f9e|root": 75,
220
+ "PART|\u63a5\u982d\u8f9e|compound": 76,
221
+ "PART|\u63a5\u982d\u8f9e|det": 77,
222
+ "PART|\u63a5\u982d\u8f9e|fixed": 78,
223
+ "PART|\u63a5\u982d\u8f9e|iobj": 79,
224
+ "PART|\u63a5\u982d\u8f9e|parataxis": 80,
225
+ "PART|\u7d42\u52a9\u8a5e|discourse": 81,
226
+ "PART|\u7d42\u52a9\u8a5e|mark": 82,
227
+ "PART|\u7d42\u52a9\u8a5e|root": 83,
228
+ "PRON|\u4ee3\u540d\u8a5e|compound": 84,
229
+ "PRON|\u4ee3\u540d\u8a5e|nsubj": 85,
230
+ "PRON|\u4ee3\u540d\u8a5e|root": 86,
231
+ "PROPN|\u56fa\u6709\u540d\u8a5e|nmod": 87,
232
+ "PROPN|\u56fa\u6709\u540d\u8a5e|nsubj": 88,
233
+ "PROPN|\u56fa\u6709\u540d\u8a5e|root": 89,
234
+ "PUNCT|\u8a18\u53f7|punct": 90,
235
+ "SCONJ|\u5f8c\u7f6e\u526f\u8a5e|case": 91,
236
+ "SCONJ|\u5f8c\u7f6e\u526f\u8a5e|parataxis": 92,
237
+ "SCONJ|\u5f8c\u7f6e\u526f\u8a5e|root": 93,
238
+ "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|advmod": 94,
239
+ "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|case": 95,
240
+ "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|cc": 96,
241
+ "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|mark": 97,
242
+ "SCONJ|\u63a5\u7d9a\u52a9\u8a5e|root": 98,
243
+ "SCONJ|\u63a5\u7d9a\u8a5e|case": 99,
244
+ "SCONJ|\u63a5\u7d9a\u8a5e|mark": 100,
245
+ "VERB|\u4ed6\u52d5\u8a5e|acl": 101,
246
+ "VERB|\u4ed6\u52d5\u8a5e|advcl": 102,
247
+ "VERB|\u4ed6\u52d5\u8a5e|amod": 103,
248
+ "VERB|\u4ed6\u52d5\u8a5e|ccomp": 104,
249
+ "VERB|\u4ed6\u52d5\u8a5e|conj": 105,
250
+ "VERB|\u4ed6\u52d5\u8a5e|parataxis": 106,
251
+ "VERB|\u4ed6\u52d5\u8a5e|root": 107,
252
+ "VERB|\u4ed6\u52d5\u8a5e\uff1f|root": 108,
253
+ "VERB|\u5b8c\u5168\u52d5\u8a5e|acl": 109,
254
+ "VERB|\u5b8c\u5168\u52d5\u8a5e|advcl": 110,
255
+ "VERB|\u5b8c\u5168\u52d5\u8a5e|parataxis": 111,
256
+ "VERB|\u5b8c\u5168\u52d5\u8a5e|root": 112,
257
+ "VERB|\u81ea\u52d5\u8a5e|acl": 113,
258
+ "VERB|\u81ea\u52d5\u8a5e|advcl": 114,
259
+ "VERB|\u81ea\u52d5\u8a5e|amod": 115,
260
+ "VERB|\u81ea\u52d5\u8a5e|ccomp": 116,
261
+ "VERB|\u81ea\u52d5\u8a5e|conj": 117,
262
+ "VERB|\u81ea\u52d5\u8a5e|parataxis": 118,
263
+ "VERB|\u81ea\u52d5\u8a5e|root": 119,
264
+ "X|_|goeswith": 120
265
+ },
266
+ "layer_norm_eps": 1e-12,
267
+ "max_position_embeddings": 512,
268
+ "model_type": "roberta",
269
+ "num_attention_heads": 12,
270
+ "num_hidden_layers": 12,
271
+ "pad_token_id": 1,
272
+ "position_embedding_type": "absolute",
273
+ "tokenizer_class": "RemBertTokenizerFast",
274
+ "torch_dtype": "float32",
275
+ "transformers_version": "4.22.1",
276
+ "type_vocab_size": 2,
277
+ "use_cache": true,
278
+ "vocab_size": 6143
279
+ }
maker.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="KoichiYasuoka/roberta-base-ainu-upos"
3
+ tgt="KoichiYasuoka/roberta-base-ainu-ud-goeswith"
4
+ import os
5
+ url="https://github.com/KoichiYasuoka/UD-Ainu"
6
+ d=os.path.basename(url)
7
+ os.system("test -d {} || git clone --depth=1 {}".format(d,url))
8
+ s='{if($0==""){if(u~/\\t0\\troot\\t/)print u;u=""}else u=u$0"\\n"}'
9
+ os.system("nawk -F'\\t' '{}' {}/ain_*-ud-*.conllu > train.conllu".format(s,d))
10
+ class UDgoeswithDataset(object):
11
+ def __init__(self,conllu,tokenizer):
12
+ self.ids,self.tags,label=[],[],set()
13
+ with open(conllu,"r",encoding="utf-8") as r:
14
+ cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
15
+ dep,c="-|_|dep",[]
16
+ for s in r:
17
+ t=s.split("\t")
18
+ if len(t)==10 and t[0].isdecimal():
19
+ c.append(t)
20
+ elif c!=[]:
21
+ for x in [1,2]:
22
+ d=list(c)
23
+ v=tokenizer([t[x] for t in d],add_special_tokens=False)["input_ids"]
24
+ for i in range(len(v)-1,-1,-1):
25
+ for j in range(1,len(v[i])):
26
+ d.insert(i+1,[d[i][0],"_","_","X","_","_",d[i][0],"goeswith","_","_"])
27
+ y=["0"]+[t[0] for t in d]
28
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(d,1)]
29
+ p,v=[t[3]+"|"+t[4]+"|"+t[7] for t in d],sum(v,[])
30
+ if len(v)<tokenizer.model_max_length-3:
31
+ self.ids.append([cls]+v+[sep])
32
+ self.tags.append([dep]+p+[dep])
33
+ label=set(sum([self.tags[-1],list(label)],[]))
34
+ for i,k in enumerate(v):
35
+ self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
36
+ self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
37
+ c=[]
38
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
39
+ __len__=lambda self:len(self.ids)
40
+ __getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
41
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
42
+ tkz=AutoTokenizer.from_pretrained(src)
43
+ trainDS=UDgoeswithDataset("train.conllu",tkz)
44
+ lid=trainDS.label2id
45
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
46
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=64,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1)
47
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS)
48
+ trn.train()
49
+ trn.save_model(tgt)
50
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:110fb430bee6a70daf2ba7bbaa2531f9228bc84f50149e4cc85bd0f4dc2503a3
3
+ size 361117489
sentencepiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
3
+ size 1
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": {
6
+ "content": "[MASK]",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "[PAD]",
13
+ "sep_token": "[SEP]",
14
+ "unk_token": "[UNK]"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": true,
5
+ "eos_token": "[SEP]",
6
+ "keep_accents": false,
7
+ "mask_token": {
8
+ "__type": "AddedToken",
9
+ "content": "[MASK]",
10
+ "lstrip": true,
11
+ "normalized": true,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
+ "model_max_length": 512,
16
+ "pad_token": "[PAD]",
17
+ "remove_space": true,
18
+ "sep_token": "[SEP]",
19
+ "split_by_punct": true,
20
+ "tokenizer_class": "RemBertTokenizerFast",
21
+ "unk_token": "[UNK]"
22
+ }
ud.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TokenClassificationPipeline
2
+
3
+ class UniversalDependenciesPipeline(TokenClassificationPipeline):
4
+ def _forward(self,model_inputs):
5
+ import torch
6
+ v=model_inputs["input_ids"][0].tolist()
7
+ with torch.no_grad():
8
+ e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)],device=self.device))
9
+ return {"logits":e.logits[:,1:-2,:],**model_inputs}
10
+ def postprocess(self,model_outputs,**kwargs):
11
+ import numpy
12
+ e=model_outputs["logits"].numpy()
13
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
14
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
15
+ g=self.model.config.label2id["X|_|goeswith"]
16
+ r=numpy.tri(e.shape[0])
17
+ for i in range(e.shape[0]):
18
+ for j in range(i+2,e.shape[1]):
19
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
20
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
21
+ m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
22
+ h=self.chu_liu_edmonds(m)
23
+ z=[i for i,j in enumerate(h) if i==j]
24
+ if len(z)>1:
25
+ k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
26
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
27
+ h=self.chu_liu_edmonds(m)
28
+ v=[(s,e,c) for (s,e),c in zip(model_outputs["offset_mapping"][0].tolist(),self.tokenizer.convert_ids_to_tokens(model_outputs["input_ids"][0].tolist())) if s<e]
29
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
30
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
31
+ for i,j in reversed(list(enumerate(q[1:],1))):
32
+ if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
33
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
34
+ s,e,c=v.pop(i)
35
+ v[i-1]=(v[i-1][0],e,v[i-1][2]+c)
36
+ q.pop(i)
37
+ t=model_outputs["sentence"].replace("\n"," ")
38
+ u="\n"
39
+ z={"a":"ァ","i":"ィ","u":"ゥ","e":"ェ","o":"ォ","k":"ㇰ","s":"ㇱ","t":"ㇳ","n":"ㇴ","h":"ㇷ","m":"ㇺ","r":"ㇽ","p":"ㇷ゚"}
40
+ f=-1
41
+ for i,(s,e,c) in reversed(list(enumerate(v))):
42
+ w,x=[j for j in t[s:e]],""
43
+ if i>0 and s<v[i-1][1]:
44
+ w[0]=z[c[0]] if c[0] in z else "ッ"
45
+ f=max(f,i)
46
+ elif f>0:
47
+ x="{}-{}\t{}\t_\t_\t_\t_\t_\t_\t_\t{}\n".format(i+1,f+1,t[s:v[f][1]],"_" if f+1<len(v) and v[f][1]<v[f+1][0] else "SpaceAfter=No")
48
+ f=-1
49
+ if i+1<len(v) and e>v[i+1][0]:
50
+ w[-1]=z[c[-1]] if c[-1] in z else "ッ"
51
+ u=x+"\t".join([str(i+1),"".join(w),"_",q[i][0],"|".join(q[i][1:-1]),"_",str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"+u
52
+ return "# text = "+t+"\n"+u
53
+ def chu_liu_edmonds(self,matrix):
54
+ import numpy
55
+ h=numpy.nanargmax(matrix,axis=0)
56
+ x=[-1 if i==j else j for i,j in enumerate(h)]
57
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
58
+ y=[]
59
+ while x!=y:
60
+ y=list(x)
61
+ for i,j in enumerate(x):
62
+ x[i]=b(x,i,j)
63
+ if max(x)<0:
64
+ return h
65
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
66
+ z=matrix-numpy.nanmax(matrix,axis=0)
67
+ m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
68
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
69
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
70
+ i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
71
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
72
+ return h