Update MANTRAGSC.py
Browse files- MANTRAGSC.py +30 -32
MANTRAGSC.py
CHANGED
@@ -118,26 +118,22 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
118 |
|
119 |
def _info(self):
|
120 |
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
|
128 |
features = datasets.Features(
|
129 |
{
|
130 |
"id": datasets.Value("string"),
|
131 |
-
# "document_id": datasets.Value("string"),
|
132 |
"tokens": [datasets.Value("string")],
|
133 |
"ner_tags": datasets.Sequence(
|
134 |
-
datasets.
|
|
|
|
|
135 |
),
|
136 |
-
# "ner_tags": datasets.Sequence(
|
137 |
-
# datasets.features.ClassLabel(
|
138 |
-
# names = names,
|
139 |
-
# )
|
140 |
-
# ),
|
141 |
}
|
142 |
)
|
143 |
|
@@ -152,9 +148,8 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
152 |
def _split_generators(self, dl_manager):
|
153 |
|
154 |
language, dataset_type = self.config.name.split("_")
|
155 |
-
|
156 |
-
data_dir = dl_manager.download_and_extract(_URL)
|
157 |
|
|
|
158 |
data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
|
159 |
|
160 |
return [
|
@@ -162,8 +157,6 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
162 |
name=datasets.Split.TRAIN,
|
163 |
gen_kwargs={
|
164 |
"data_dir": data_dir,
|
165 |
-
"language": language,
|
166 |
-
"dataset_type": dataset_type,
|
167 |
"split": "train",
|
168 |
},
|
169 |
),
|
@@ -171,8 +164,6 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
171 |
name=datasets.Split.VALIDATION,
|
172 |
gen_kwargs={
|
173 |
"data_dir": data_dir,
|
174 |
-
"language": language,
|
175 |
-
"dataset_type": dataset_type,
|
176 |
"split": "validation",
|
177 |
},
|
178 |
),
|
@@ -180,15 +171,12 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
180 |
name=datasets.Split.TEST,
|
181 |
gen_kwargs={
|
182 |
"data_dir": data_dir,
|
183 |
-
"language": language,
|
184 |
-
"dataset_type": dataset_type,
|
185 |
"split": "test",
|
186 |
},
|
187 |
),
|
188 |
]
|
189 |
|
190 |
-
def _generate_examples(self, data_dir,
|
191 |
-
"""Yields examples as (key, example) tuples."""
|
192 |
|
193 |
with open(data_dir) as fd:
|
194 |
doc = xmltodict.parse(fd.read())
|
@@ -197,9 +185,6 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
197 |
|
198 |
for d in doc["Corpus"]["document"]:
|
199 |
|
200 |
-
# print(d)
|
201 |
-
# print()
|
202 |
-
|
203 |
if type(d["unit"]) != type(list()):
|
204 |
d["unit"] = [d["unit"]]
|
205 |
|
@@ -236,13 +221,14 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
236 |
"offset_end": offset_end,
|
237 |
})
|
238 |
|
239 |
-
ner_tags = ["O" for o in tokens]
|
240 |
|
241 |
for tag in tags:
|
242 |
|
|
|
|
|
243 |
for idx, token in enumerate(tokens):
|
244 |
|
245 |
-
# Range du tag
|
246 |
rtok = range(token["offset_start"], token["offset_end"]+1)
|
247 |
rtag = range(tag["offset_start"], tag["offset_end"]+1)
|
248 |
|
@@ -252,15 +238,27 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
252 |
# if ner_tags[idx] != "O" and ner_tags[idx] != tag['label']:
|
253 |
# print(f"{token} - currently: {ner_tags[idx]} - after: {tag['label']}")
|
254 |
|
255 |
-
ner_tags[idx]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
|
257 |
obj = {
|
258 |
"id": u["@id"],
|
259 |
"tokens": [t["token"] for t in tokens],
|
260 |
-
"ner_tags": ner_tags,
|
261 |
}
|
262 |
-
# print(obj)
|
263 |
-
# print("*"*50)
|
264 |
|
265 |
all_res.append(obj)
|
266 |
|
|
|
118 |
|
119 |
def _info(self):
|
120 |
|
121 |
+
if self.config.name.find("emea") != -1:
|
122 |
+
names = ['B-ANAT', 'I-ANAT', 'I-PHEN', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-DEVI', 'O', 'B-PHYS', 'I-DEVI', 'B-OBJC', 'I-DISO', 'B-PHEN', 'I-LIVB', 'B-DISO', 'B-LIVB', 'B-CHEM', 'I-PROC']
|
123 |
+
elif self.config.name.find("medline") != -1:
|
124 |
+
names = ['B-ANAT', 'I-ANAT', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-GEOG', 'B-DEVI', 'O', 'B-PHYS', 'I-LIVB', 'B-OBJC', 'I-DISO', 'I-DEVI', 'B-PHEN', 'B-DISO', 'B-LIVB', 'B-CHEM', 'I-PROC']
|
125 |
+
elif self.config.name.find("patents") != -1:
|
126 |
+
names = ['B-ANAT', 'I-ANAT', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-DEVI', 'O', 'I-LIVB', 'B-OBJC', 'I-DISO', 'B-PHEN', 'I-PROC', 'B-DISO', 'I-DEVI', 'B-LIVB', 'B-CHEM', 'B-PHYS']
|
127 |
|
128 |
features = datasets.Features(
|
129 |
{
|
130 |
"id": datasets.Value("string"),
|
|
|
131 |
"tokens": [datasets.Value("string")],
|
132 |
"ner_tags": datasets.Sequence(
|
133 |
+
datasets.features.ClassLabel(
|
134 |
+
names = names,
|
135 |
+
)
|
136 |
),
|
|
|
|
|
|
|
|
|
|
|
137 |
}
|
138 |
)
|
139 |
|
|
|
148 |
def _split_generators(self, dl_manager):
|
149 |
|
150 |
language, dataset_type = self.config.name.split("_")
|
|
|
|
|
151 |
|
152 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
153 |
data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
|
154 |
|
155 |
return [
|
|
|
157 |
name=datasets.Split.TRAIN,
|
158 |
gen_kwargs={
|
159 |
"data_dir": data_dir,
|
|
|
|
|
160 |
"split": "train",
|
161 |
},
|
162 |
),
|
|
|
164 |
name=datasets.Split.VALIDATION,
|
165 |
gen_kwargs={
|
166 |
"data_dir": data_dir,
|
|
|
|
|
167 |
"split": "validation",
|
168 |
},
|
169 |
),
|
|
|
171 |
name=datasets.Split.TEST,
|
172 |
gen_kwargs={
|
173 |
"data_dir": data_dir,
|
|
|
|
|
174 |
"split": "test",
|
175 |
},
|
176 |
),
|
177 |
]
|
178 |
|
179 |
+
def _generate_examples(self, data_dir, split):
|
|
|
180 |
|
181 |
with open(data_dir) as fd:
|
182 |
doc = xmltodict.parse(fd.read())
|
|
|
185 |
|
186 |
for d in doc["Corpus"]["document"]:
|
187 |
|
|
|
|
|
|
|
188 |
if type(d["unit"]) != type(list()):
|
189 |
d["unit"] = [d["unit"]]
|
190 |
|
|
|
221 |
"offset_end": offset_end,
|
222 |
})
|
223 |
|
224 |
+
ner_tags = [["O", 0] for o in tokens]
|
225 |
|
226 |
for tag in tags:
|
227 |
|
228 |
+
cpt = 0
|
229 |
+
|
230 |
for idx, token in enumerate(tokens):
|
231 |
|
|
|
232 |
rtok = range(token["offset_start"], token["offset_end"]+1)
|
233 |
rtag = range(tag["offset_start"], tag["offset_end"]+1)
|
234 |
|
|
|
238 |
# if ner_tags[idx] != "O" and ner_tags[idx] != tag['label']:
|
239 |
# print(f"{token} - currently: {ner_tags[idx]} - after: {tag['label']}")
|
240 |
|
241 |
+
if ner_tags[idx][0] == "O":
|
242 |
+
cpt += 1
|
243 |
+
ner_tags[idx][0] = tag["label"]
|
244 |
+
ner_tags[idx][1] = cpt
|
245 |
+
|
246 |
+
for i in range(len(ner_tags)):
|
247 |
+
|
248 |
+
tag = ner_tags[i][0]
|
249 |
+
|
250 |
+
if tag == "O":
|
251 |
+
continue
|
252 |
+
elif tag != "O" and ner_tags[i][1] == 1:
|
253 |
+
ner_tags[i][0] = "B-" + tag
|
254 |
+
elif tag != "O" and ner_tags[i][1] != 1:
|
255 |
+
ner_tags[i][0] = "I-" + tag
|
256 |
|
257 |
obj = {
|
258 |
"id": u["@id"],
|
259 |
"tokens": [t["token"] for t in tokens],
|
260 |
+
"ner_tags": [n[0] for n in ner_tags],
|
261 |
}
|
|
|
|
|
262 |
|
263 |
all_res.append(obj)
|
264 |
|