ebiquity-v2-stemmed / ebiquity-v2-stemmed.py
manirai91's picture
EBIQUITY V2 Stemmed dataset added
2b7f7ed
import re
import datasets
_CITATION = """
@INPROCEEDINGS{
8998477,
author={O. M. {Singh} and A. {Padia} and A. {Joshi}},
booktitle={2019 IEEE 5th International Conference on Collaboration and Internet Computing (CIC)},
title={Named Entity Recognition for Nepali Language},
year={2019},
volume={},
number={},
pages={184-190},
keywords={Named Entity Recognition;Nepali;Low-resource;BiLSTM;CNN;Grapheme},
doi={10.1109/CIC48465.2019.00031},
ISSN={null},
month={Dec},}
"""
_DESCRIPTION = """
Ebiquity V2 (stemmed) dataset for Nepali NER task. The dataset is tagged with BIO scheme.
"""
_URL = "https://raw.githubusercontent.com/mani-rai/nepali-ner/master/data/ebiquity_v2/stemmed/total.bio"
class EbiquityV2StemmedConfig(datasets.BuilderConfig):
"""BuilderConfig for Ebiquity V2 Stemmed"""
def __init__(self, **kwargs):
"""BuilderConfig for Ebiquity V2 Stemmed.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(EbiquityV2StemmedConfig, self).__init__(**kwargs)
class EbiquityV2Stemmed(datasets.GeneratorBasedBuilder):
"""Ebiquity V2 Stemmed"""
BUILDER_CONFIGS = [
EbiquityV2StemmedConfig(name="ebiquity-v2-stemmed", version=datasets.Version("1.0.0"), description="Ebiquity v2 stemmed dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
"B-MISC",
"I-MISC",
]
)
),
}
),
supervised_keys=None,
homepage="https://arxiv.org/abs/1908.05828",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_file = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file})
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
splits = re.split('\t+', line)
tokens.append(splits[0])
ner_tags.append(splits[1].rstrip())
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}