manirai91 commited on
Commit
27c0edf
·
1 Parent(s): 64b452e

Ebiquity V2 dataset added.

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. ebiquity-v2.py +105 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"ebiquity-v2": {"description": "\nEbiquity V2 (non-stemmed) dataset for Nepali NER task. The dataset is tagged with BIO scheme.\n", "citation": "\n@INPROCEEDINGS{\n 8998477,\n author={O. M. {Singh} and A. {Padia} and A. {Joshi}},\n booktitle={2019 IEEE 5th International Conference on Collaboration and Internet Computing (CIC)},\n title={Named Entity Recognition for Nepali Language},\n year={2019},\n volume={},\n number={},\n pages={184-190},\n keywords={Named Entity Recognition;Nepali;Low-resource;BiLSTM;CNN;Grapheme},\n doi={10.1109/CIC48465.2019.00031},\n ISSN={null},\n month={Dec},}\n", "homepage": "https://arxiv.org/abs/1908.05828", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 9, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ebiquity_v2", "config_name": "ebiquity-v2", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2034448, "num_examples": 3289, "dataset_name": "ebiquity_v2"}}, "download_checksums": {"https://raw.githubusercontent.com/oya163/nepali-ner/master/data/ebiquity_v2/raw/total.bio": {"num_bytes": 1450701, "checksum": "05b16ee0c99706ddb278628f46516ac9d373787a39967484eb28eab4a12875c6"}}, "download_size": 1450701, "post_processing_size": null, "dataset_size": 2034448, "size_in_bytes": 3485149}}
ebiquity-v2.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import datasets
4
+
5
+ _CITATION = """
6
+ @INPROCEEDINGS{
7
+ 8998477,
8
+ author={O. M. {Singh} and A. {Padia} and A. {Joshi}},
9
+ booktitle={2019 IEEE 5th International Conference on Collaboration and Internet Computing (CIC)},
10
+ title={Named Entity Recognition for Nepali Language},
11
+ year={2019},
12
+ volume={},
13
+ number={},
14
+ pages={184-190},
15
+ keywords={Named Entity Recognition;Nepali;Low-resource;BiLSTM;CNN;Grapheme},
16
+ doi={10.1109/CIC48465.2019.00031},
17
+ ISSN={null},
18
+ month={Dec},}
19
+ """
20
+
21
+ _DESCRIPTION = """
22
+ Ebiquity V2 (non-stemmed) dataset for Nepali NER task. The dataset is tagged with BIO scheme.
23
+ """
24
+
25
+ _URL = "https://raw.githubusercontent.com/oya163/nepali-ner/master/data/ebiquity_v2/raw/total.bio"
26
+
27
+
28
+ class EbiquityV2Config(datasets.BuilderConfig):
29
+ """BuilderConfig for Conll2003"""
30
+
31
+ def __init__(self, **kwargs):
32
+ """BuilderConfig for Ebiquity V2.
33
+ Args:
34
+ **kwargs: keyword arguments forwarded to super.
35
+ """
36
+ super(EbiquityV2Config, self).__init__(**kwargs)
37
+
38
+
39
+ class EbiquityV2(datasets.GeneratorBasedBuilder):
40
+ """Ebiquity V2"""
41
+
42
+ BUILDER_CONFIGS = [
43
+ EbiquityV2Config(name="ebiquity-v2", version=datasets.Version("1.0.0"), description="Ebiquity V2 dataset"),
44
+ ]
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=datasets.Features(
50
+ {
51
+ "id": datasets.Value("string"),
52
+ "tokens": datasets.Sequence(datasets.Value("string")),
53
+ "ner_tags": datasets.Sequence(
54
+ datasets.features.ClassLabel(
55
+ names=[
56
+ "O",
57
+ "B-PER",
58
+ "I-PER",
59
+ "B-ORG",
60
+ "I-ORG",
61
+ "B-LOC",
62
+ "I-LOC",
63
+ "B-MISC",
64
+ "I-MISC",
65
+ ]
66
+ )
67
+ ),
68
+ }
69
+ ),
70
+ supervised_keys=None,
71
+ homepage="https://arxiv.org/abs/1908.05828",
72
+ citation=_CITATION,
73
+ )
74
+
75
+ def _split_generators(self, dl_manager):
76
+ downloaded_file = dl_manager.download_and_extract(_URL)
77
+ return [
78
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file})
79
+ ]
80
+
81
+ def _generate_examples(self, filepath):
82
+ with open(filepath, encoding="utf-8") as f:
83
+ guid = 0
84
+ tokens = []
85
+ ner_tags = []
86
+ for line in f:
87
+ if line == "" or line == "\n":
88
+ if tokens:
89
+ yield guid, {
90
+ "id": str(guid),
91
+ "tokens": tokens,
92
+ "ner_tags": ner_tags,
93
+ }
94
+ guid += 1
95
+ tokens = []
96
+ ner_tags = []
97
+ else:
98
+ splits = re.split('\t+', line)
99
+ tokens.append(splits[0])
100
+ ner_tags.append(splits[1].rstrip())
101
+ yield guid, {
102
+ "id": str(guid),
103
+ "tokens": tokens,
104
+ "ner_tags": ner_tags,
105
+ }