|
import random |
|
import re |
|
import xml.etree.ElementTree as ET |
|
from typing import Tuple, List, Set |
|
from tqdm import tqdm |
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
GENIA Term corpus |
|
""" |
|
|
|
|
|
_HOMEPAGE = "http://www.geniaproject.org/genia-corpus/term-corpus" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URLS = "http://www.nactem.ac.uk/GENIA/current/GENIA-corpus/Term/GENIAcorpus3.02.tgz" |
|
|
|
def _split_files(data_dir): |
|
root = ET.parse(os.path.join(data_dir, "GENIA_term_3.02", "GENIAcorpus3.02.xml")).getroot() |
|
articles = root.findall(".//article") |
|
|
|
train_root = ET.Element("set") |
|
dev_root = ET.Element("set") |
|
test_root = ET.Element("set") |
|
|
|
for a in articles: |
|
root.remove(a) |
|
|
|
random.shuffle(articles) |
|
|
|
for a in articles[:1600]: |
|
train_root.append(a) |
|
|
|
for a in articles[1600:1800]: |
|
dev_root.append(a) |
|
|
|
for a in articles[1800:]: |
|
test_root.append(a) |
|
|
|
ET.ElementTree(train_root).write(os.path.join(data_dir, "train.xml")) |
|
ET.ElementTree(dev_root).write(os.path.join(data_dir, "dev.xml")) |
|
ET.ElementTree(test_root).write(os.path.join(data_dir, "test.xml")) |
|
|
|
class GENIATermCorpus(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("0.9.0") |
|
|
|
pattern = re.compile(r"[,\.;:\[\]\(\)]") |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"folded_tokens": datasets.Sequence(datasets.Value("string")), |
|
"labels": datasets.Sequence(datasets.Value("string")) |
|
|
|
|
|
|
|
|
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
|
|
_split_files(data_dir) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "train.xml"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "test.xml"), |
|
"split": "test" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "dev.xml"), |
|
"split": "dev", |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath:str, split): |
|
root = ET.parse(filepath) |
|
articles = root.findall(".//article") |
|
for idx, article in enumerate(articles): |
|
article_id, data= self.parse_article(article) |
|
for sen_ix, (tokens, entities) in enumerate(data): |
|
yield f"{split}_{idx}_{sen_ix}", { |
|
"tokens": tokens, |
|
"folded_tokens": [t.lower() for t in tokens], |
|
"labels": entities |
|
} |
|
|
|
def parse_article(self, article:ET): |
|
|
|
article_id = article.find("./articleinfo/bibliomisc").text |
|
|
|
sentences = article.findall(".//sentence") |
|
data = list() |
|
for sentence in sentences: |
|
data.append(self. build_bio_tags(*self.flatten_tree(sentence))) |
|
|
|
return article_id, data |
|
|
|
def build_bio_tags(self, text_segments:List[str], entities:List[str]) -> Tuple[List[str], List[str]]: |
|
|
|
|
|
tokens, tags = list(), list() |
|
for seg, entity in zip(text_segments, entities): |
|
|
|
seg = self.pattern.sub(r" \g<0> ", seg).strip() |
|
t = seg.split() |
|
tokens.extend(t) |
|
tags.extend( [f"B-{entity}"] + [f"I-{entity}"] * (len(t) - 1) if entity != "O" else ["O"] * len(t)) |
|
return tokens, tags |
|
|
|
|
|
def flatten_tree(self, elem:ET) -> Tuple[List[str], List[str]]: |
|
|
|
text_segments, entities = list(), list() |
|
if elem.text: |
|
text_segments.append(elem.text) |
|
if elem.tag == "cons" and "sem" in elem.attrib: |
|
tag = elem.attrib['sem'].replace("G#", "") |
|
else: |
|
tag = "O" |
|
entities.append(tag) |
|
for child in elem: |
|
c_segments, c_entities = self.flatten_tree(child) |
|
text_segments.extend(c_segments) |
|
entities.extend(c_entities) |
|
if elem.tail and elem.tail != '\n': |
|
text_segments.append(elem.tail) |
|
entities.append("O") |
|
|
|
|
|
return text_segments, entities |