Commit
·
acebe43
1
Parent(s):
9252910
Fix TOO MANY REQUESTS error (#1)
Browse files- Optimize code to use iter_files instead of globs (7f9e057ca2210c62fa743746854f5a3311704663)
- Fix style (0bcfe4e5c64d2f6f415281138c5febfc3cba4ce0)
Co-authored-by: Albert Villanova <albertvillanova@users.noreply.huggingface.co>
- cantemist.py +91 -84
cantemist.py
CHANGED
@@ -22,12 +22,14 @@ mapped by clinical experts to a controlled terminology. Every tumor morphology
|
|
22 |
mention is linked to an eCIE-O code (the Spanish equivalent of ICD-O).
|
23 |
"""
|
24 |
|
25 |
-
import
|
|
|
|
|
|
|
26 |
from pathlib import Path
|
27 |
from typing import Dict, List, Tuple
|
28 |
|
29 |
import datasets
|
30 |
-
import pandas as pd
|
31 |
|
32 |
from .bigbiohub import kb_features
|
33 |
from .bigbiohub import text_features
|
@@ -37,7 +39,7 @@ from .bigbiohub import parse_brat_file
|
|
37 |
from .bigbiohub import brat_parse_to_bigbio_kb
|
38 |
|
39 |
|
40 |
-
_LANGUAGES = [
|
41 |
_PUBMED = False
|
42 |
_LOCAL = False
|
43 |
_CITATION = """\
|
@@ -79,10 +81,10 @@ For further information, please visit https://temu.bsc.es/cantemist or send an e
|
|
79 |
|
80 |
_HOMEPAGE = "https://temu.bsc.es/cantemist/?p=4338"
|
81 |
|
82 |
-
_LICENSE =
|
83 |
|
84 |
_URLS = {
|
85 |
-
|
86 |
}
|
87 |
|
88 |
_SUPPORTED_TASKS = [
|
@@ -229,120 +231,123 @@ class CantemistDataset(datasets.GeneratorBasedBuilder):
|
|
229 |
call `this._generate_examples` with the keyword arguments in `gen_kwargs`.
|
230 |
"""
|
231 |
|
232 |
-
data_dir = dl_manager.download_and_extract(_URLS[
|
233 |
|
234 |
return [
|
235 |
datasets.SplitGenerator(
|
236 |
name=datasets.Split.TRAIN,
|
237 |
gen_kwargs={
|
238 |
"filepaths": {
|
239 |
-
"task1":
|
240 |
-
os.path.join(data_dir, "train-set
|
241 |
),
|
242 |
-
"task2":
|
243 |
-
os.path.join(data_dir, "train-set
|
244 |
-
),
|
245 |
-
"task3": Path(
|
246 |
-
os.path.join(data_dir, "train-set/cantemist-coding")
|
247 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
248 |
},
|
249 |
-
"split": "train",
|
250 |
},
|
251 |
),
|
252 |
datasets.SplitGenerator(
|
253 |
name=datasets.Split.TEST,
|
254 |
gen_kwargs={
|
255 |
"filepaths": {
|
256 |
-
"task1":
|
257 |
-
|
258 |
-
os.path.join(data_dir, "test-set/cantemist-norm")
|
259 |
),
|
260 |
-
"
|
261 |
-
os.path.join(data_dir, "test-set
|
262 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
},
|
264 |
-
"split": "test",
|
265 |
},
|
266 |
),
|
267 |
datasets.SplitGenerator(
|
268 |
name=datasets.Split.VALIDATION,
|
269 |
gen_kwargs={
|
270 |
"filepaths": {
|
271 |
-
"
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
os.path.join(data_dir, "dev-set1/cantemist-norm")
|
279 |
-
),
|
280 |
-
"task2_set2": Path(
|
281 |
-
os.path.join(data_dir, "dev-set2/cantemist-norm")
|
282 |
-
),
|
283 |
-
"task3_set1": Path(
|
284 |
-
os.path.join(data_dir, "dev-set1/cantemist-coding")
|
285 |
),
|
286 |
-
"
|
287 |
-
|
|
|
|
|
|
|
|
|
|
|
288 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
},
|
290 |
-
"split": "dev",
|
291 |
},
|
292 |
),
|
293 |
]
|
294 |
|
295 |
-
def _generate_examples(self, filepaths
|
296 |
"""
|
297 |
This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
298 |
Method parameters are unpacked from `gen_kwargs` as given in `_split_generators`.
|
299 |
"""
|
300 |
|
301 |
-
if split != "dev":
|
302 |
-
txt_files_task1 = list(filepaths["task1"].glob("*txt"))
|
303 |
-
txt_files_task2 = list(filepaths["task2"].glob("*txt"))
|
304 |
-
tsv_file_task3 = Path(
|
305 |
-
os.path.join(filepaths["task3"], f"{split}-coding.tsv")
|
306 |
-
)
|
307 |
-
task3_df = pd.read_csv(tsv_file_task3, sep="\t", header=None)
|
308 |
-
else:
|
309 |
-
txt_files_task1, txt_files_task2, dfs = [], [], []
|
310 |
-
for i in range(1, 3):
|
311 |
-
txt_files_task1 += list(filepaths[f"task1_set{i}"].glob("*txt"))
|
312 |
-
txt_files_task2 += list(filepaths[f"task2_set{i}"].glob("*txt"))
|
313 |
-
tsv_file_task3 = Path(
|
314 |
-
os.path.join(filepaths[f"task3_set{i}"], f"{split}{i}-coding.tsv")
|
315 |
-
)
|
316 |
-
df = pd.read_csv(tsv_file_task3, sep="\t", header=0)
|
317 |
-
dfs.append(df)
|
318 |
-
task3_df = pd.concat(dfs)
|
319 |
-
|
320 |
if self.config.schema == "source" or self.config.schema == "bigbio_text":
|
321 |
-
task3_dict =
|
322 |
-
for
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
task3_dict[file] += [code]
|
328 |
|
329 |
if self.config.schema == "source":
|
330 |
-
for guid,
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
[]
|
339 |
-
) # few cases where subtrack 3 has no codes for the current document
|
340 |
example["id"] = str(guid)
|
341 |
yield guid, example
|
342 |
|
343 |
elif self.config.schema == "bigbio_kb":
|
344 |
-
for guid,
|
345 |
-
|
|
|
|
|
|
|
|
|
346 |
example = brat_parse_to_bigbio_kb(parsed_brat)
|
347 |
example["id"] = str(guid)
|
348 |
for i in range(0, len(example["entities"])):
|
@@ -354,14 +359,16 @@ class CantemistDataset(datasets.GeneratorBasedBuilder):
|
|
354 |
yield guid, example
|
355 |
|
356 |
elif self.config.schema == "bigbio_text":
|
357 |
-
for guid,
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
|
|
|
|
365 |
example = {
|
366 |
"id": str(guid),
|
367 |
"document_id": parsed_brat["document_id"],
|
|
|
22 |
mention is linked to an eCIE-O code (the Spanish equivalent of ICD-O).
|
23 |
"""
|
24 |
|
25 |
+
import csv
|
26 |
+
import os.path
|
27 |
+
from collections import defaultdict
|
28 |
+
from itertools import chain
|
29 |
from pathlib import Path
|
30 |
from typing import Dict, List, Tuple
|
31 |
|
32 |
import datasets
|
|
|
33 |
|
34 |
from .bigbiohub import kb_features
|
35 |
from .bigbiohub import text_features
|
|
|
39 |
from .bigbiohub import brat_parse_to_bigbio_kb
|
40 |
|
41 |
|
42 |
+
_LANGUAGES = ["Spanish"]
|
43 |
_PUBMED = False
|
44 |
_LOCAL = False
|
45 |
_CITATION = """\
|
|
|
81 |
|
82 |
_HOMEPAGE = "https://temu.bsc.es/cantemist/?p=4338"
|
83 |
|
84 |
+
_LICENSE = "Creative Commons Attribution 4.0 International"
|
85 |
|
86 |
_URLS = {
|
87 |
+
_DATASETNAME: "https://zenodo.org/record/3978041/files/cantemist.zip?download=1",
|
88 |
}
|
89 |
|
90 |
_SUPPORTED_TASKS = [
|
|
|
231 |
call `this._generate_examples` with the keyword arguments in `gen_kwargs`.
|
232 |
"""
|
233 |
|
234 |
+
data_dir = dl_manager.download_and_extract(_URLS[_DATASETNAME])
|
235 |
|
236 |
return [
|
237 |
datasets.SplitGenerator(
|
238 |
name=datasets.Split.TRAIN,
|
239 |
gen_kwargs={
|
240 |
"filepaths": {
|
241 |
+
"task1": dl_manager.iter_files(
|
242 |
+
os.path.join(data_dir, "train-set", "cantemist-ner")
|
243 |
),
|
244 |
+
"task2": dl_manager.iter_files(
|
245 |
+
os.path.join(data_dir, "train-set", "cantemist-norm")
|
|
|
|
|
|
|
246 |
),
|
247 |
+
"task3": [
|
248 |
+
os.path.join(
|
249 |
+
data_dir,
|
250 |
+
"train-set",
|
251 |
+
"cantemist-coding",
|
252 |
+
"train-coding.tsv",
|
253 |
+
)
|
254 |
+
],
|
255 |
},
|
|
|
256 |
},
|
257 |
),
|
258 |
datasets.SplitGenerator(
|
259 |
name=datasets.Split.TEST,
|
260 |
gen_kwargs={
|
261 |
"filepaths": {
|
262 |
+
"task1": dl_manager.iter_files(
|
263 |
+
os.path.join(data_dir, "test-set", "cantemist-ner")
|
|
|
264 |
),
|
265 |
+
"task2": dl_manager.iter_files(
|
266 |
+
os.path.join(data_dir, "test-set", "cantemist-norm")
|
267 |
),
|
268 |
+
"task3": [
|
269 |
+
os.path.join(
|
270 |
+
data_dir,
|
271 |
+
"test-set",
|
272 |
+
"cantemist-coding",
|
273 |
+
"test-coding.tsv",
|
274 |
+
)
|
275 |
+
],
|
276 |
},
|
|
|
277 |
},
|
278 |
),
|
279 |
datasets.SplitGenerator(
|
280 |
name=datasets.Split.VALIDATION,
|
281 |
gen_kwargs={
|
282 |
"filepaths": {
|
283 |
+
"task1": chain(
|
284 |
+
dl_manager.iter_files(
|
285 |
+
os.path.join(data_dir, "dev-set1", "cantemist-ner")
|
286 |
+
),
|
287 |
+
dl_manager.iter_files(
|
288 |
+
os.path.join(data_dir, "dev-set2", "cantemist-ner")
|
289 |
+
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
290 |
),
|
291 |
+
"task2": chain(
|
292 |
+
dl_manager.iter_files(
|
293 |
+
os.path.join(data_dir, "dev-set1", "cantemist-norm")
|
294 |
+
),
|
295 |
+
dl_manager.iter_files(
|
296 |
+
os.path.join(data_dir, "dev-set2", "cantemist-norm")
|
297 |
+
),
|
298 |
),
|
299 |
+
"task3": [
|
300 |
+
os.path.join(
|
301 |
+
data_dir,
|
302 |
+
"dev-set1",
|
303 |
+
"cantemist-coding",
|
304 |
+
"dev1-coding.tsv",
|
305 |
+
),
|
306 |
+
os.path.join(
|
307 |
+
data_dir,
|
308 |
+
"dev-set2",
|
309 |
+
"cantemist-coding",
|
310 |
+
"dev2-coding.tsv",
|
311 |
+
),
|
312 |
+
],
|
313 |
},
|
|
|
314 |
},
|
315 |
),
|
316 |
]
|
317 |
|
318 |
+
def _generate_examples(self, filepaths) -> Tuple[int, Dict]:
|
319 |
"""
|
320 |
This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
321 |
Method parameters are unpacked from `gen_kwargs` as given in `_split_generators`.
|
322 |
"""
|
323 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
if self.config.schema == "source" or self.config.schema == "bigbio_text":
|
325 |
+
task3_dict = defaultdict(list)
|
326 |
+
for file_path in filepaths["task3"]:
|
327 |
+
with open(file_path, newline="", encoding="utf-8") as f:
|
328 |
+
reader = csv.DictReader(f, delimiter="\t")
|
329 |
+
for row in reader:
|
330 |
+
task3_dict[row["file"]].append(row["code"])
|
|
|
331 |
|
332 |
if self.config.schema == "source":
|
333 |
+
for guid, file_path in enumerate(filepaths["task2"]):
|
334 |
+
if os.path.splitext(file_path)[-1] != ".txt":
|
335 |
+
continue
|
336 |
+
example = parse_brat_file(
|
337 |
+
Path(file_path), annotation_file_suffixes=[".ann"], parse_notes=True
|
338 |
+
)
|
339 |
+
# consider few cases where subtrack 3 has no codes for the current document
|
340 |
+
example["labels"] = task3_dict.get(example["document_id"], [])
|
|
|
|
|
341 |
example["id"] = str(guid)
|
342 |
yield guid, example
|
343 |
|
344 |
elif self.config.schema == "bigbio_kb":
|
345 |
+
for guid, file_path in enumerate(filepaths["task2"]):
|
346 |
+
if os.path.splitext(file_path)[-1] != ".txt":
|
347 |
+
continue
|
348 |
+
parsed_brat = parse_brat_file(
|
349 |
+
Path(file_path), annotation_file_suffixes=[".ann"], parse_notes=True
|
350 |
+
)
|
351 |
example = brat_parse_to_bigbio_kb(parsed_brat)
|
352 |
example["id"] = str(guid)
|
353 |
for i in range(0, len(example["entities"])):
|
|
|
359 |
yield guid, example
|
360 |
|
361 |
elif self.config.schema == "bigbio_text":
|
362 |
+
for guid, file_path in enumerate(filepaths["task1"]):
|
363 |
+
if os.path.splitext(file_path)[-1] != ".txt":
|
364 |
+
continue
|
365 |
+
parsed_brat = parse_brat_file(
|
366 |
+
Path(file_path),
|
367 |
+
annotation_file_suffixes=[".ann"],
|
368 |
+
parse_notes=False,
|
369 |
+
)
|
370 |
+
# consider few cases where subtrack 3 has no codes for the current document
|
371 |
+
labels = task3_dict.get(parsed_brat["document_id"], [])
|
372 |
example = {
|
373 |
"id": str(guid),
|
374 |
"document_id": parsed_brat["document_id"],
|