|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_CITATION = """ |
|
@InProceedings{huggingface:dataset, |
|
title = {Luganda, Kanuri, and Hausa NER Dataset}, |
|
author = {multiple authors}, |
|
year = {2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
LugandaPII is a dataset that includes named entities such as PERSON, ORG, LOCATION, NORP, USERID, and DATE. |
|
The dataset is available in Lum, Kanuri, and Hausa languages, distributed across train, validation, and test splits. |
|
""" |
|
|
|
_URL = "https://github.com/EricPeter/pii/raw/main/data" |
|
_TRAINING_FILE = "train.txt" |
|
_VAL_FILE = "val.txt" |
|
_TEST_FILE = "test.txt" |
|
|
|
class LugPIIConfig(datasets.BuilderConfig): |
|
"""Configuration for LugandaPII dataset.""" |
|
def __init__(self, **kwargs): |
|
super().__init__(**kwargs) |
|
|
|
class Masakhaner(datasets.GeneratorBasedBuilder): |
|
"""Generator for Masakhaner dataset.""" |
|
BUILDER_CONFIGS = [ |
|
LugPIIConfig(name="lug", version=datasets.Version("1.0.0"), description="PII NER dataset for Luganda."), |
|
LugPIIConfig(name="hau", version=datasets.Version("1.0.0"), description="PII NER dataset for Hausa."), |
|
LugPIIConfig(name="knr", version=datasets.Version("1.0.0"), description="PII NER dataset for Kanuri."), |
|
LugPIIConfig(name="lum", version=datasets.Version("1.0.0"), description="PII NER dataset for Lum."), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"id": datasets.Value("string"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=['B-DATE', |
|
'B-GOVT_ID', |
|
'B-LOC', |
|
'B-LOCATION', |
|
'B-NORP', |
|
'B-ORG', |
|
'B-PERSON', |
|
'B-USERID', |
|
'B-USER_ID', |
|
'I-DATE', |
|
'I-GOVT_ID', |
|
'I-LOC', |
|
'I-LOCATION', |
|
'I-NORP', |
|
'I-ORG', |
|
'I-PERSON', |
|
'I-USERID', |
|
'I-USER_ID', |
|
'L-DATE', |
|
'L-GOVT_ID', |
|
'L-LOC', |
|
'L-LOCATION', |
|
'L-NORP', |
|
'L-ORG', |
|
'L-PERSON', |
|
'L-USERID', |
|
'L-USER_ID', |
|
'O', |
|
'U-DATE', |
|
'U-GOVT_ID', |
|
'U-LOCATION', |
|
'U-NORP', |
|
'U-ORG', |
|
'U-PERSON', |
|
'U-USERID'])), |
|
}), |
|
supervised_keys=None, |
|
homepage="", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
lang_code = self.config.name |
|
urls_to_download = { |
|
"train": f"{_URL}/{lang_code}/{_TRAINING_FILE}", |
|
"val": f"{_URL}/{lang_code}/{_VAL_FILE}", |
|
"test": f"{_URL}/{lang_code}/{_TEST_FILE}" |
|
} |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}) |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
logger.info("Generating examples from = %s", filepath) |
|
with open(filepath, encoding="utf-8") as f: |
|
guid = 0 |
|
tokens = [] |
|
ner_tags = [] |
|
for line in f: |
|
if line.strip() == "": |
|
if tokens: |
|
yield guid, {"id": str(guid), "tokens": tokens, "ner_tags": ner_tags} |
|
guid += 1 |
|
tokens = [] |
|
ner_tags = [] |
|
continue |
|
splits = line.strip().split() |
|
tokens.append(splits[0]) |
|
ner_tags.append(splits[1]) |
|
if tokens: |
|
yield guid, {"id": str(guid), "tokens": tokens, "ner_tags": ner_tags} |
|
|