Update ESSAI.py
Browse files
ESSAI.py
CHANGED
@@ -39,6 +39,39 @@ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
|
|
39 |
|
40 |
_LICENSE = 'Data User Agreement'
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
class ESSAI(datasets.GeneratorBasedBuilder):
|
43 |
|
44 |
DEFAULT_CONFIG_NAME = "pos_spec"
|
@@ -65,6 +98,11 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
65 |
"pos_tags": [datasets.features.ClassLabel(
|
66 |
names = ['B-INT', 'B-PRO:POS', 'B-PRP', 'B-SENT', 'B-PRO', 'B-ABR', 'B-VER:pres', 'B-KON', 'B-SYM', 'B-DET:POS', 'B-VER:', 'B-PRO:IND', 'B-NAM', 'B-ADV', 'B-PRO:DEM', 'B-NN', 'B-PRO:PER', 'B-VER:pper', 'B-VER:ppre', 'B-PUN', 'B-VER:simp', 'B-PREF', 'B-NUM', 'B-VER:futu', 'B-NOM', 'B-VER:impf', 'B-VER:subp', 'B-VER:infi', 'B-DET:ART', 'B-PUN:cit', 'B-ADJ', 'B-PRP:det', 'B-PRO:REL', 'B-VER:cond', 'B-VER:subi'],
|
67 |
)],
|
|
|
|
|
|
|
|
|
|
|
68 |
}
|
69 |
)
|
70 |
|
@@ -97,6 +135,11 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
97 |
"ner_tags": [datasets.features.ClassLabel(
|
98 |
names = names,
|
99 |
)],
|
|
|
|
|
|
|
|
|
|
|
100 |
}
|
101 |
)
|
102 |
|
@@ -226,6 +269,7 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
226 |
"tokens": tokens,
|
227 |
"lemmas": text_lemmas,
|
228 |
"pos_tags": pos_tags,
|
|
|
229 |
})
|
230 |
unique_id_doc.append(doc_id)
|
231 |
|
@@ -285,6 +329,7 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
285 |
"tokens": tokens,
|
286 |
"lemmas": text_lemmas,
|
287 |
"ner_tags": ner_tags,
|
|
|
288 |
})
|
289 |
|
290 |
key += 1
|
|
|
39 |
|
40 |
_LICENSE = 'Data User Agreement'
|
41 |
|
42 |
+
class StringIndex:
|
43 |
+
|
44 |
+
def __init__(self, vocab):
|
45 |
+
|
46 |
+
self.vocab_struct = {}
|
47 |
+
|
48 |
+
print("Start building the index!")
|
49 |
+
for t in vocab:
|
50 |
+
|
51 |
+
if len(t) == 0:
|
52 |
+
continue
|
53 |
+
|
54 |
+
# Index terms by their first letter and length
|
55 |
+
key = (t[0], len(t))
|
56 |
+
|
57 |
+
if (key in self.vocab_struct) == False:
|
58 |
+
self.vocab_struct[key] = []
|
59 |
+
|
60 |
+
self.vocab_struct[key].append(t)
|
61 |
+
|
62 |
+
print("Finished building the index!")
|
63 |
+
|
64 |
+
def find(self, t):
|
65 |
+
|
66 |
+
key = (t[0], len(t))
|
67 |
+
|
68 |
+
if (key in self.vocab_struct) == False:
|
69 |
+
return "is_oov"
|
70 |
+
|
71 |
+
return "is_not_oov" if t in self.vocab_struct[key] else "is_oov"
|
72 |
+
|
73 |
+
_VOCAB = StringIndex(vocab=open("./vocabulary_nachos_lowercased.txt","r").read().split("\n"))
|
74 |
+
|
75 |
class ESSAI(datasets.GeneratorBasedBuilder):
|
76 |
|
77 |
DEFAULT_CONFIG_NAME = "pos_spec"
|
|
|
98 |
"pos_tags": [datasets.features.ClassLabel(
|
99 |
names = ['B-INT', 'B-PRO:POS', 'B-PRP', 'B-SENT', 'B-PRO', 'B-ABR', 'B-VER:pres', 'B-KON', 'B-SYM', 'B-DET:POS', 'B-VER:', 'B-PRO:IND', 'B-NAM', 'B-ADV', 'B-PRO:DEM', 'B-NN', 'B-PRO:PER', 'B-VER:pper', 'B-VER:ppre', 'B-PUN', 'B-VER:simp', 'B-PREF', 'B-NUM', 'B-VER:futu', 'B-NOM', 'B-VER:impf', 'B-VER:subp', 'B-VER:infi', 'B-DET:ART', 'B-PUN:cit', 'B-ADJ', 'B-PRP:det', 'B-PRO:REL', 'B-VER:cond', 'B-VER:subi'],
|
100 |
)],
|
101 |
+
"is_oov": datasets.Sequence(
|
102 |
+
datasets.features.ClassLabel(
|
103 |
+
names=['is_not_oov', 'is_oov'],
|
104 |
+
),
|
105 |
+
),
|
106 |
}
|
107 |
)
|
108 |
|
|
|
135 |
"ner_tags": [datasets.features.ClassLabel(
|
136 |
names = names,
|
137 |
)],
|
138 |
+
"is_oov": datasets.Sequence(
|
139 |
+
datasets.features.ClassLabel(
|
140 |
+
names=['is_not_oov', 'is_oov'],
|
141 |
+
),
|
142 |
+
),
|
143 |
}
|
144 |
)
|
145 |
|
|
|
269 |
"tokens": tokens,
|
270 |
"lemmas": text_lemmas,
|
271 |
"pos_tags": pos_tags,
|
272 |
+
"is_oov": [_VOCAB.find(tt.lower()) for tt in tokens],
|
273 |
})
|
274 |
unique_id_doc.append(doc_id)
|
275 |
|
|
|
329 |
"tokens": tokens,
|
330 |
"lemmas": text_lemmas,
|
331 |
"ner_tags": ner_tags,
|
332 |
+
"is_oov": [_VOCAB.find(tt.lower()) for tt in tokens],
|
333 |
})
|
334 |
|
335 |
key += 1
|