KoichiYasuoka
commited on
Commit
•
cfe253c
1
Parent(s):
2b854af
UniversalDependenciesPipeline
Browse files- README.md +8 -0
- config.json +5 -0
- ud.py +85 -0
README.md
CHANGED
@@ -84,3 +84,11 @@ print(nlp("Hai cái đầu thì tốt hơn một."))
|
|
84 |
```
|
85 |
|
86 |
with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/) and [ViNLP](https://pypi.org/project/ViNLP/).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
```
|
85 |
|
86 |
with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/) and [ViNLP](https://pypi.org/project/ViNLP/).
|
87 |
+
Or without them:
|
88 |
+
|
89 |
+
```
|
90 |
+
from transformers import pipeline
|
91 |
+
nlp=pipeline("universal-dependencies","KoichiYasuoka/phobert-base-vietnamese-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
|
92 |
+
print(nlp("Hai cái đầu thì tốt hơn một."))
|
93 |
+
```
|
94 |
+
|
config.json
CHANGED
@@ -5,6 +5,11 @@
|
|
5 |
"attention_probs_dropout_prob": 0.1,
|
6 |
"bos_token_id": 0,
|
7 |
"classifier_dropout": null,
|
|
|
|
|
|
|
|
|
|
|
8 |
"eos_token_id": 2,
|
9 |
"gradient_checkpointing": false,
|
10 |
"hidden_act": "gelu",
|
|
|
5 |
"attention_probs_dropout_prob": 0.1,
|
6 |
"bos_token_id": 0,
|
7 |
"classifier_dropout": null,
|
8 |
+
"custom_pipelines": {
|
9 |
+
"universal-dependencies": {
|
10 |
+
"impl": "ud.UniversalDependenciesPipeline"
|
11 |
+
}
|
12 |
+
},
|
13 |
"eos_token_id": 2,
|
14 |
"gradient_checkpointing": false,
|
15 |
"hidden_act": "gelu",
|
ud.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import TokenClassificationPipeline
|
2 |
+
|
3 |
+
class UniversalDependenciesPipeline(TokenClassificationPipeline):
|
4 |
+
def preprocess(self,sentence,offset_mapping=None):
|
5 |
+
from tokenizers.pre_tokenizers import Whitespace
|
6 |
+
t=[]
|
7 |
+
for k,(s,e) in Whitespace().pre_tokenize_str(sentence):
|
8 |
+
if t==[]:
|
9 |
+
t.append((k,(s,e)))
|
10 |
+
else:
|
11 |
+
j=t[-1][0]+"_"+k
|
12 |
+
if self.tokenizer.convert_tokens_to_ids(j)!=self.tokenizer.unk_token_id:
|
13 |
+
t[-1]=(j,(t[-1][1][0],e))
|
14 |
+
else:
|
15 |
+
t.append((k,(s,e)))
|
16 |
+
r=super().preprocess(sentence=" ".join(i for i,j in t))
|
17 |
+
m=[(0,0)]+[j for i,j in t]+[(0,0)]
|
18 |
+
w=self.tokenizer.convert_ids_to_tokens(r["input_ids"][0])
|
19 |
+
if len(m)!=len(w):
|
20 |
+
for i,j in enumerate(w):
|
21 |
+
if j.endswith("@@"):
|
22 |
+
s,e=m[i]
|
23 |
+
m.insert(i+1,(s+len(j)-2,e))
|
24 |
+
m[i]=(s,s+len(j)-2)
|
25 |
+
r["offset_mapping"]=m
|
26 |
+
r["sentence"]=sentence
|
27 |
+
return r
|
28 |
+
def _forward(self,model_inputs):
|
29 |
+
import torch
|
30 |
+
v=model_inputs["input_ids"][0].tolist()
|
31 |
+
with torch.no_grad():
|
32 |
+
e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]))
|
33 |
+
return {"logits":e.logits[:,1:-2,:],**model_inputs}
|
34 |
+
def postprocess(self,model_outputs,**kwargs):
|
35 |
+
import numpy
|
36 |
+
e=model_outputs["logits"].numpy()
|
37 |
+
r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
|
38 |
+
e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
|
39 |
+
g=self.model.config.label2id["X|_|goeswith"]
|
40 |
+
r=numpy.tri(e.shape[0])
|
41 |
+
for i in range(e.shape[0]):
|
42 |
+
for j in range(i+2,e.shape[1]):
|
43 |
+
r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1
|
44 |
+
e[:,:,g]+=numpy.where(r==0,0,numpy.nan)
|
45 |
+
m,p=numpy.nanmax(e,axis=2),numpy.nanargmax(e,axis=2)
|
46 |
+
h=self.chu_liu_edmonds(m)
|
47 |
+
z=[i for i,j in enumerate(h) if i==j]
|
48 |
+
if len(z)>1:
|
49 |
+
k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m)
|
50 |
+
m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
|
51 |
+
h=self.chu_liu_edmonds(m)
|
52 |
+
v=[(s,e) for s,e in model_outputs["offset_mapping"] if s<e]
|
53 |
+
q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
|
54 |
+
g="aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none"
|
55 |
+
if g:
|
56 |
+
for i,j in reversed(list(enumerate(q[1:],1))):
|
57 |
+
if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
|
58 |
+
h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
|
59 |
+
v[i-1]=(v[i-1][0],v.pop(i)[1])
|
60 |
+
q.pop(i)
|
61 |
+
t=model_outputs["sentence"].replace("\n"," ")
|
62 |
+
u="# text = "+t+"\n"
|
63 |
+
for i,(s,e) in enumerate(v):
|
64 |
+
u+="\t".join([str(i+1),t[s:e],t[s:e] if g else "_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
|
65 |
+
return u+"\n"
|
66 |
+
def chu_liu_edmonds(self,matrix):
|
67 |
+
import numpy
|
68 |
+
h=numpy.nanargmax(matrix,axis=0)
|
69 |
+
x=[-1 if i==j else j for i,j in enumerate(h)]
|
70 |
+
for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
|
71 |
+
y=[]
|
72 |
+
while x!=y:
|
73 |
+
y=list(x)
|
74 |
+
for i,j in enumerate(x):
|
75 |
+
x[i]=b(x,i,j)
|
76 |
+
if max(x)<0:
|
77 |
+
return h
|
78 |
+
y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
|
79 |
+
z=matrix-numpy.nanmax(matrix,axis=0)
|
80 |
+
m=numpy.block([[z[x,:][:,x],numpy.nanmax(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.nanmax(z[y,:][:,x],axis=0),numpy.nanmax(z[y,y])]])
|
81 |
+
k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.nanargmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
|
82 |
+
h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
|
83 |
+
i=y[numpy.nanargmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
|
84 |
+
h[i]=x[k[-1]] if k[-1]<len(x) else i
|
85 |
+
return h
|