|
from datasets import BuilderConfig, Version, GeneratorBasedBuilder, DatasetInfo, Features, Value, \ |
|
Sequence, ClassLabel, DownloadManager, SplitGenerator, Split |
|
import datasets |
|
import os |
|
import textwrap |
|
import csv |
|
from ast import literal_eval |
|
|
|
|
|
_DESCRIPTION = """ |
|
The recognition and classification of proper nouns and names in plain text is of key importance in Natural Language |
|
Processing (NLP) as it has a beneficial effect on the performance of various types of applications, including |
|
Information Extraction, Machine Translation, Syntactic Parsing/Chunking, etc.""" |
|
_CITATION = """""" |
|
_FEATURES = Features( |
|
{ |
|
"id": Value("int32"), |
|
"tokens": Sequence(Value("string")), |
|
"ner": Sequence( |
|
ClassLabel( |
|
names=[ |
|
"O", |
|
"B-PER", |
|
"I-PER", |
|
"B-ORG", |
|
"I-ORG", |
|
"B-LOC", |
|
"I-LOC", |
|
"B-MISC", |
|
"I-MISC", |
|
] |
|
) |
|
), |
|
"document_id": Value("int32"), |
|
"sentence_id": Value("int32") |
|
} |
|
) |
|
|
|
|
|
class SzegedNERConfig(BuilderConfig): |
|
"""BuilderConfig for SzegedNER.""" |
|
|
|
def __init__( |
|
self, |
|
features, |
|
label_column, |
|
data_dir, |
|
citation, |
|
url, |
|
process_label=lambda x: x, |
|
**kwargs, |
|
): |
|
super(SzegedNERConfig, self).__init__(version=Version("1.0.0", ""), **kwargs) |
|
self.features = features |
|
self.label_column = label_column |
|
self.data_dir = data_dir |
|
self.citation = citation |
|
self.url = url |
|
self.process_label = process_label |
|
|
|
|
|
class SzegedNER(GeneratorBasedBuilder): |
|
"""SzegedNER datasets.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
SzegedNERConfig( |
|
name="business", |
|
description=textwrap.dedent( |
|
"""\ |
|
The Named Entity Corpus for Hungarian is a subcorpus of the Szeged Treebank, which contains full syntactic |
|
annotations done manually by linguist experts. A significant part of these texts has been annotated with |
|
Named Entity class labels in line with the annotation standards used on the CoNLL-2003 shared task.""" |
|
), |
|
features=_FEATURES, |
|
label_column="ner_tags", |
|
data_dir="data/business/", |
|
citation=textwrap.dedent(_CITATION), |
|
url="https://rgai.inf.u-szeged.hu/node/130" |
|
), |
|
SzegedNERConfig( |
|
name="criminal", |
|
description=textwrap.dedent( |
|
"""\ |
|
The Hungarian National Corpus and its Heti Világgazdaság (HVG) subcorpus provided the basis for corpus text |
|
selection: articles related to the topic of financially liable offences were selected and annotated for the |
|
categories person, organization, location and miscellaneous. There are two annotated versions of the corpus. |
|
When preparing the tag-for-meaning annotation, our linguists took into consideration the context in which |
|
the Named Entity under investigation occurred, thus, it was not the primary sense of the Named Entity that |
|
determined the tag (e.g. Manchester=LOC) but its contextual reference (e.g. Manchester won the Premier |
|
League=ORG). As for tag-for-tag annotation, these cases were not differentiated: tags were always given on |
|
the basis of the primary sense.""" |
|
), |
|
features=_FEATURES, |
|
label_column="ner_tags", |
|
data_dir="data/criminal/", |
|
citation=textwrap.dedent(_CITATION), |
|
url="https://rgai.inf.u-szeged.hu/node/130" |
|
) |
|
] |
|
|
|
def _info(self): |
|
return DatasetInfo( |
|
description=self.config.description, |
|
features=self.config.features, |
|
homepage=self.config.url, |
|
citation=self.config.citation, |
|
) |
|
|
|
def _split_generators(self, dl_manager: DownloadManager): |
|
url = f"{self.base_path}{self.config.data_dir}" |
|
|
|
path = dl_manager.download({key: f"{url}{key}.csv" for key in ["train", "validation", "test"]}) |
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={"split_key": "train", "data_file": path['train']}, |
|
), |
|
SplitGenerator( |
|
name=Split.VALIDATION, |
|
gen_kwargs={"split_key": "validation", "data_file": path['validation']}, |
|
), |
|
SplitGenerator( |
|
name=Split.TEST, |
|
gen_kwargs={"split_key": "test", "data_file": path['test']}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, data_file, split_key, **kwargs): |
|
with open(data_file, encoding="utf8") as f: |
|
reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_MINIMAL) |
|
for n, row in enumerate(reader): |
|
labels = literal_eval(row['ner']) |
|
tokens = literal_eval(row['tokens']) |
|
if len(labels) != len(tokens): |
|
raise ValueError("Number of tokens and labels does not match") |
|
yield n, { |
|
"id": int(row['id']), |
|
"tokens": tokens, |
|
"ner": labels, |
|
"document_id": int(row['document_id']), |
|
"sentence_id": int(row['sentence_id']) |
|
} |
|
|