KoichiYasuoka commited on
Commit
6e7ef7c
1 Parent(s): e125dd6

initial release

Browse files
Files changed (10) hide show
  1. README.md +75 -0
  2. added_tokens.json +7 -0
  3. config.json +228 -0
  4. maker.py +54 -0
  5. pytorch_model.bin +3 -0
  6. special_tokens_map.json +51 -0
  7. spm.model +3 -0
  8. tokenizer.json +0 -0
  9. tokenizer_config.json +58 -0
  10. ud.py +62 -0
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "ja"
4
+ tags:
5
+ - "japanese"
6
+ - "pos"
7
+ - "dependency-parsing"
8
+ datasets:
9
+ - "universal_dependencies"
10
+ license: "apache-2.0"
11
+ pipeline_tag: "token-classification"
12
+ widget:
13
+ - text: "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"
14
+ ---
15
+
16
+ # deberta-v3-base-japanese-ud-goeswith
17
+
18
+ ## Model Description
19
+
20
+ This is a DeBERTa(V3) model pretrained on LLM-jp corpus v1.0 for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [deberta-v3-base-japanese](https://huggingface.co/ku-nlp/deberta-v3-base-japanese) and [UD_Japanese-GSDLUW](https://github.com/UniversalDependencies/UD_Japanese-GSDLUW).
21
+
22
+ ## How to Use
23
+
24
+ ```py
25
+ class UDgoeswith(object):
26
+ def __init__(self,bert):
27
+ from transformers import AutoTokenizer,AutoModelForTokenClassification
28
+ self.tokenizer=AutoTokenizer.from_pretrained(bert)
29
+ self.model=AutoModelForTokenClassification.from_pretrained(bert)
30
+ def __call__(self,text):
31
+ import numpy,torch,ufal.chu_liu_edmonds
32
+ w=self.tokenizer(text,return_offsets_mapping=True)
33
+ v=w["input_ids"]
34
+ x=[v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]
35
+ with torch.no_grad():
36
+ e=self.model(input_ids=torch.tensor(x)).logits.numpy()[:,1:-2,:]
37
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
38
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
39
+ g=self.model.config.label2id["X|_|goeswith"]
40
+ r=numpy.tri(e.shape[0])
41
+ for i in range(e.shape[0]):
42
+ for j in range(i+2,e.shape[1]):
43
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
44
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
45
+ m=numpy.full((e.shape[0]+1,e.shape[1]+1),numpy.nan)
46
+ m[1:,1:]=numpy.nanmax(e,axis=2).transpose()
47
+ p=numpy.zeros(m.shape)
48
+ p[1:,1:]=numpy.nanargmax(e,axis=2).transpose()
49
+ for i in range(1,m.shape[0]):
50
+ m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i]
51
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
52
+ if [0 for i in h if i==0]!=[0]:
53
+ m[:,0]+=numpy.where(m[:,0]==numpy.nanmax(m[[i for i,j in enumerate(h) if j==0],0]),0,numpy.nan)
54
+ m[[i for i,j in enumerate(h) if j==0]]+=[0 if i==0 or j==0 else numpy.nan for i,j in enumerate(h)]
55
+ h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
56
+ u="# text = "+text+"\n"
57
+ v=[(s,e) for s,e in w["offset_mapping"] if s<e]
58
+ for i,(s,e) in enumerate(v,1):
59
+ q=self.model.config.id2label[p[i,h[i]]].split("|")
60
+ u+="\t".join([str(i),text[s:e],"_",q[0],"_","|".join(q[1:-1]),str(h[i]),q[-1],"_","_" if i<len(v) and e<v[i][0] else "SpaceAfter=No"])+"\n"
61
+ return u+"\n"
62
+
63
+ nlp=UDgoeswith("KoichiYasuoka/deberta-v3-base-japanese-ud-goeswith")
64
+ print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
65
+ ```
66
+
67
+ with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/).
68
+ Or without ufal.chu-liu-edmonds:
69
+
70
+ ```
71
+ from transformers import pipeline
72
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/deberta-v3-base-japanese-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
73
+ print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
74
+ ```
75
+
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "[CLS]": 96871,
3
+ "[MASK]": 96867,
4
+ "[PAD]": 96869,
5
+ "[SEP]": 96868,
6
+ "[UNK]": 96870
7
+ }
config.json ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DebertaV2ForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "custom_pipelines": {
7
+ "universal-dependencies": {
8
+ "impl": "ud.UniversalDependenciesPipeline"
9
+ }
10
+ },
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "-|_|dep",
16
+ "1": "ADJ|_|acl",
17
+ "2": "ADJ|_|advcl",
18
+ "3": "ADJ|_|amod",
19
+ "4": "ADJ|_|ccomp",
20
+ "5": "ADJ|_|csubj",
21
+ "6": "ADJ|_|csubj:outer",
22
+ "7": "ADJ|_|dep",
23
+ "8": "ADJ|_|nmod",
24
+ "9": "ADJ|_|nsubj",
25
+ "10": "ADJ|_|obj",
26
+ "11": "ADJ|_|obl",
27
+ "12": "ADJ|_|root",
28
+ "13": "ADP|_|case",
29
+ "14": "ADP|_|fixed",
30
+ "15": "ADV|_|advcl",
31
+ "16": "ADV|_|advmod",
32
+ "17": "ADV|_|dep",
33
+ "18": "ADV|_|obj",
34
+ "19": "ADV|_|root",
35
+ "20": "AUX|Polarity=Neg|aux",
36
+ "21": "AUX|Polarity=Neg|fixed",
37
+ "22": "AUX|_|aux",
38
+ "23": "AUX|_|cop",
39
+ "24": "AUX|_|fixed",
40
+ "25": "AUX|_|root",
41
+ "26": "CCONJ|_|cc",
42
+ "27": "DET|_|det",
43
+ "28": "INTJ|_|discourse",
44
+ "29": "INTJ|_|root",
45
+ "30": "NOUN|Polarity=Neg|obl",
46
+ "31": "NOUN|Polarity=Neg|root",
47
+ "32": "NOUN|_|acl",
48
+ "33": "NOUN|_|advcl",
49
+ "34": "NOUN|_|ccomp",
50
+ "35": "NOUN|_|compound",
51
+ "36": "NOUN|_|csubj",
52
+ "37": "NOUN|_|csubj:outer",
53
+ "38": "NOUN|_|nmod",
54
+ "39": "NOUN|_|nsubj",
55
+ "40": "NOUN|_|nsubj:outer",
56
+ "41": "NOUN|_|obj",
57
+ "42": "NOUN|_|obl",
58
+ "43": "NOUN|_|root",
59
+ "44": "NUM|_|advcl",
60
+ "45": "NUM|_|compound",
61
+ "46": "NUM|_|nmod",
62
+ "47": "NUM|_|nsubj",
63
+ "48": "NUM|_|nsubj:outer",
64
+ "49": "NUM|_|nummod",
65
+ "50": "NUM|_|obj",
66
+ "51": "NUM|_|obl",
67
+ "52": "NUM|_|root",
68
+ "53": "PART|_|mark",
69
+ "54": "PRON|_|acl",
70
+ "55": "PRON|_|advcl",
71
+ "56": "PRON|_|nmod",
72
+ "57": "PRON|_|nsubj",
73
+ "58": "PRON|_|nsubj:outer",
74
+ "59": "PRON|_|obj",
75
+ "60": "PRON|_|obl",
76
+ "61": "PRON|_|root",
77
+ "62": "PROPN|_|acl",
78
+ "63": "PROPN|_|advcl",
79
+ "64": "PROPN|_|compound",
80
+ "65": "PROPN|_|nmod",
81
+ "66": "PROPN|_|nsubj",
82
+ "67": "PROPN|_|nsubj:outer",
83
+ "68": "PROPN|_|obj",
84
+ "69": "PROPN|_|obl",
85
+ "70": "PROPN|_|root",
86
+ "71": "PUNCT|_|punct",
87
+ "72": "SCONJ|_|dep",
88
+ "73": "SCONJ|_|fixed",
89
+ "74": "SCONJ|_|mark",
90
+ "75": "SYM|_|compound",
91
+ "76": "SYM|_|dep",
92
+ "77": "SYM|_|nmod",
93
+ "78": "SYM|_|obl",
94
+ "79": "VERB|_|acl",
95
+ "80": "VERB|_|advcl",
96
+ "81": "VERB|_|ccomp",
97
+ "82": "VERB|_|compound",
98
+ "83": "VERB|_|csubj",
99
+ "84": "VERB|_|csubj:outer",
100
+ "85": "VERB|_|nmod",
101
+ "86": "VERB|_|obj",
102
+ "87": "VERB|_|obl",
103
+ "88": "VERB|_|root",
104
+ "89": "X|_|dep",
105
+ "90": "X|_|goeswith",
106
+ "91": "X|_|nmod"
107
+ },
108
+ "initializer_range": 0.02,
109
+ "intermediate_size": 3072,
110
+ "label2id": {
111
+ "-|_|dep": 0,
112
+ "ADJ|_|acl": 1,
113
+ "ADJ|_|advcl": 2,
114
+ "ADJ|_|amod": 3,
115
+ "ADJ|_|ccomp": 4,
116
+ "ADJ|_|csubj": 5,
117
+ "ADJ|_|csubj:outer": 6,
118
+ "ADJ|_|dep": 7,
119
+ "ADJ|_|nmod": 8,
120
+ "ADJ|_|nsubj": 9,
121
+ "ADJ|_|obj": 10,
122
+ "ADJ|_|obl": 11,
123
+ "ADJ|_|root": 12,
124
+ "ADP|_|case": 13,
125
+ "ADP|_|fixed": 14,
126
+ "ADV|_|advcl": 15,
127
+ "ADV|_|advmod": 16,
128
+ "ADV|_|dep": 17,
129
+ "ADV|_|obj": 18,
130
+ "ADV|_|root": 19,
131
+ "AUX|Polarity=Neg|aux": 20,
132
+ "AUX|Polarity=Neg|fixed": 21,
133
+ "AUX|_|aux": 22,
134
+ "AUX|_|cop": 23,
135
+ "AUX|_|fixed": 24,
136
+ "AUX|_|root": 25,
137
+ "CCONJ|_|cc": 26,
138
+ "DET|_|det": 27,
139
+ "INTJ|_|discourse": 28,
140
+ "INTJ|_|root": 29,
141
+ "NOUN|Polarity=Neg|obl": 30,
142
+ "NOUN|Polarity=Neg|root": 31,
143
+ "NOUN|_|acl": 32,
144
+ "NOUN|_|advcl": 33,
145
+ "NOUN|_|ccomp": 34,
146
+ "NOUN|_|compound": 35,
147
+ "NOUN|_|csubj": 36,
148
+ "NOUN|_|csubj:outer": 37,
149
+ "NOUN|_|nmod": 38,
150
+ "NOUN|_|nsubj": 39,
151
+ "NOUN|_|nsubj:outer": 40,
152
+ "NOUN|_|obj": 41,
153
+ "NOUN|_|obl": 42,
154
+ "NOUN|_|root": 43,
155
+ "NUM|_|advcl": 44,
156
+ "NUM|_|compound": 45,
157
+ "NUM|_|nmod": 46,
158
+ "NUM|_|nsubj": 47,
159
+ "NUM|_|nsubj:outer": 48,
160
+ "NUM|_|nummod": 49,
161
+ "NUM|_|obj": 50,
162
+ "NUM|_|obl": 51,
163
+ "NUM|_|root": 52,
164
+ "PART|_|mark": 53,
165
+ "PRON|_|acl": 54,
166
+ "PRON|_|advcl": 55,
167
+ "PRON|_|nmod": 56,
168
+ "PRON|_|nsubj": 57,
169
+ "PRON|_|nsubj:outer": 58,
170
+ "PRON|_|obj": 59,
171
+ "PRON|_|obl": 60,
172
+ "PRON|_|root": 61,
173
+ "PROPN|_|acl": 62,
174
+ "PROPN|_|advcl": 63,
175
+ "PROPN|_|compound": 64,
176
+ "PROPN|_|nmod": 65,
177
+ "PROPN|_|nsubj": 66,
178
+ "PROPN|_|nsubj:outer": 67,
179
+ "PROPN|_|obj": 68,
180
+ "PROPN|_|obl": 69,
181
+ "PROPN|_|root": 70,
182
+ "PUNCT|_|punct": 71,
183
+ "SCONJ|_|dep": 72,
184
+ "SCONJ|_|fixed": 73,
185
+ "SCONJ|_|mark": 74,
186
+ "SYM|_|compound": 75,
187
+ "SYM|_|dep": 76,
188
+ "SYM|_|nmod": 77,
189
+ "SYM|_|obl": 78,
190
+ "VERB|_|acl": 79,
191
+ "VERB|_|advcl": 80,
192
+ "VERB|_|ccomp": 81,
193
+ "VERB|_|compound": 82,
194
+ "VERB|_|csubj": 83,
195
+ "VERB|_|csubj:outer": 84,
196
+ "VERB|_|nmod": 85,
197
+ "VERB|_|obj": 86,
198
+ "VERB|_|obl": 87,
199
+ "VERB|_|root": 88,
200
+ "X|_|dep": 89,
201
+ "X|_|goeswith": 90,
202
+ "X|_|nmod": 91
203
+ },
204
+ "layer_norm_eps": 1e-07,
205
+ "max_position_embeddings": 512,
206
+ "max_relative_positions": -1,
207
+ "model_type": "deberta-v2",
208
+ "norm_rel_ebd": "layer_norm",
209
+ "num_attention_heads": 12,
210
+ "num_hidden_layers": 12,
211
+ "pad_token_id": 0,
212
+ "pooler_dropout": 0,
213
+ "pooler_hidden_act": "gelu",
214
+ "pooler_hidden_size": 768,
215
+ "pos_att_type": [
216
+ "p2c",
217
+ "c2p"
218
+ ],
219
+ "position_biased_input": false,
220
+ "position_buckets": 256,
221
+ "relative_attention": true,
222
+ "share_att_key": true,
223
+ "tokenizer_class": "DebertaV2Tokenizer",
224
+ "torch_dtype": "float32",
225
+ "transformers_version": "4.40.1",
226
+ "type_vocab_size": 0,
227
+ "vocab_size": 96900
228
+ }
maker.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="ku-nlp/deberta-v3-base-japanese"
3
+ tgt="KoichiYasuoka/deberta-v3-base-japanese-ud-goeswith"
4
+ url="https://github.com/UniversalDependencies/UD_Japanese-GSDLUW"
5
+ import os
6
+ d=os.path.basename(url)
7
+ os.system("test -d "+d+" || git clone --depth=1 "+url)
8
+ os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
9
+ class UDgoeswithDataset(object):
10
+ def __init__(self,conllu,tokenizer):
11
+ self.ids,self.tags,label=[],[],set()
12
+ with open(conllu,"r",encoding="utf-8") as r:
13
+ cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
14
+ dep,c="-|_|dep",[]
15
+ for s in r:
16
+ t=s.split("\t")
17
+ if len(t)==10 and t[0].isdecimal():
18
+ c.append(t)
19
+ elif c!=[] and s.strip()=="":
20
+ v=tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
21
+ for i in range(len(v)-1,-1,-1):
22
+ for j in range(1,len(v[i])):
23
+ c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
24
+ y=["0"]+[t[0] for t in c]
25
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(c,1)]
26
+ p,v=[t[3]+"|"+t[5]+"|"+t[7] for t in c],sum(v,[])
27
+ self.ids.append([cls]+v+[sep])
28
+ self.tags.append([dep]+p+[dep])
29
+ label=set(sum([self.tags[-1],list(label)],[]))
30
+ for i,k in enumerate(v):
31
+ self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
32
+ self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
33
+ c=[]
34
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
35
+ def __call__(*args):
36
+ label=set(sum([list(t.label2id) for t in args],[]))
37
+ lid={l:i for i,l in enumerate(sorted(label))}
38
+ for t in args:
39
+ t.label2id=lid
40
+ return lid
41
+ __len__=lambda self:len(self.ids)
42
+ __getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
43
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
44
+ tkz=AutoTokenizer.from_pretrained(src,model_max_length=512)
45
+ trainDS=UDgoeswithDataset("train.conllu",tkz)
46
+ devDS=UDgoeswithDataset("dev.conllu",tkz)
47
+ testDS=UDgoeswithDataset("test.conllu",tkz)
48
+ lid=trainDS(devDS,testDS)
49
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()})
50
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=24,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
51
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg),train_dataset=trainDS,eval_dataset=devDS)
52
+ trn.train()
53
+ trn.save_model(tgt)
54
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c4fb04d6c1af75d1c7f38389a0b972cb35256c9ed553936475635a93d4f7ad5
3
+ size 639830001
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fefde905766244f5e613a490d6e35236043d6483c4aae0eaac4b4a8fc365a88
3
+ size 1658609
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "96867": {
4
+ "content": "[MASK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "96868": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "96869": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "96870": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "96871": {
36
+ "content": "[CLS]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": false,
48
+ "eos_token": "[SEP]",
49
+ "keep_accents": true,
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 512,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "sp_model_kwargs": {},
55
+ "split_by_punct": false,
56
+ "tokenizer_class": "DebertaV2Tokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
ud.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TokenClassificationPipeline
2
+
3
+ class UniversalDependenciesPipeline(TokenClassificationPipeline):
4
+ def _forward(self,model_inputs):
5
+ import torch
6
+ v=model_inputs["input_ids"][0].tolist()
7
+ with torch.no_grad():
8
+ e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)],device=self.device))
9
+ return {"logits":e.logits[:,1:-2,:],**model_inputs}
10
+ def postprocess(self,model_outputs,**kwargs):
11
+ import numpy
12
+ if "logits" not in model_outputs:
13
+ return "".join(self.postprocess(x,**kwargs) for x in model_outputs)
14
+ e=model_outputs["logits"].numpy()
15
+ r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
16
+ e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
17
+ g=self.model.config.label2id["X|_|goeswith"]
18
+ r=numpy.tri(e.shape[0])
19
+ for i in range(e.shape[0]):
20
+ for j in range(i+2,e.shape[1]):
21
+ r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
22
+ e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
23
+ m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
24
+ h=self.chu_liu_edmonds(m)
25
+ z=[i for i,j in enumerate(h) if i==j]
26
+ if len(z)>1:
27
+ k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
28
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
29
+ h=self.chu_liu_edmonds(m)
30
+ v=[(s,e) for s,e in model_outputs["offset_mapping"][0].tolist() if s<e]
31
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
32
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
33
+ for i,j in reversed(list(enumerate(q[1:],1))):
34
+ if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
35
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
36
+ v[i-1]=(v[i-1][0],v.pop(i)[1])
37
+ q.pop(i)
38
+ t=model_outputs["sentence"].replace("\n"," ")
39
+ u="# text = "+t+"\n"
40
+ for i,(s,e) in enumerate(v):
41
+ u+="\t".join([str(i+1),t[s:e],"_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
42
+ return u+"\n"
43
+ def chu_liu_edmonds(self,matrix):
44
+ import numpy
45
+ h=numpy.nanargmax(matrix,axis=0)
46
+ x=[-1 if i==j else j for i,j in enumerate(h)]
47
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
48
+ y=[]
49
+ while x!=y:
50
+ y=list(x)
51
+ for i,j in enumerate(x):
52
+ x[i]=b(x,i,j)
53
+ if max(x)<0:
54
+ return h
55
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
56
+ z=matrix-numpy.nanmax(matrix,axis=0)
57
+ m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
58
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
59
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
60
+ i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
61
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
62
+ return h