EricPeter commited on
Commit
23ca1ed
β€’
1 Parent(s): b6a82a6

Upload ner.py

Browse files
Files changed (1) hide show
  1. ner.py +140 -0
ner.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+ logger = datasets.logging.get_logger(__name__)
4
+
5
+ _CITATION = """
6
+ @InProceedings{huggingface:dataset,
7
+ title = {Luganda, Kanuri, and Hausa NER Dataset},
8
+ author = {multiple authors},
9
+ year = {2022}
10
+ }
11
+ """
12
+
13
+ _DESCRIPTION = """
14
+ LugandaPII is a dataset that includes named entities such as PERSON, ORG, LOCATION, NORP, USERID, and DATE.
15
+ The dataset is available in Lum, Kanuri, and Hausa languages, distributed across train, validation, and test splits.
16
+ """
17
+
18
+ # Data directory structure
19
+ data_directory = """
20
+ data
21
+ β”œβ”€β”€ hau
22
+ β”‚ β”œβ”€β”€ test.txt
23
+ β”‚ β”œβ”€β”€ train.txt
24
+ β”‚ └── val.txt
25
+ β”œβ”€β”€ knr
26
+ β”‚ β”œβ”€β”€ test.txt
27
+ β”‚ β”œβ”€β”€ train.txt
28
+ β”‚ └── val.txt
29
+ β”œβ”€β”€ lug
30
+ β”‚ β”œβ”€β”€ test.txt
31
+ β”‚ β”œβ”€β”€ train.txt
32
+ β”‚ └── val.txt
33
+ └── lum
34
+ β”œβ”€β”€ test.txt
35
+ β”œβ”€β”€ train.txt
36
+ └── val.txt
37
+ """
38
+
39
+ _URL = "https://github.com/EricPeter/pii/raw/main/data"
40
+ _TRAINING_FILE = "train.txt"
41
+ _VAL_FILE = "val.txt"
42
+ _TEST_FILE = "test.txt"
43
+
44
+ class LugPIIConfig(datasets.BuilderConfig):
45
+ """Configuration for LugandaPII dataset."""
46
+ def __init__(self, **kwargs):
47
+ super().__init__(**kwargs)
48
+
49
+ class Masakhaner(datasets.GeneratorBasedBuilder):
50
+ """Generator for Masakhaner dataset."""
51
+ BUILDER_CONFIGS = [
52
+ LugPIIConfig(name="lug", version=datasets.Version("1.0.0"), description="PII NER dataset for Luganda."),
53
+ LugPIIConfig(name="hau", version=datasets.Version("1.0.0"), description="PII NER dataset for Hausa."),
54
+ LugPIIConfig(name="knr", version=datasets.Version("1.0.0"), description="PII NER dataset for Hausa."),
55
+ LugPIIConfig(name="lum", version=datasets.Version("1.0.0"), description="PII NER dataset for Hausa."),
56
+
57
+ ]
58
+
59
+ def _info(self):
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=datasets.Features({
63
+ "id": datasets.Value("string"),
64
+ "tokens": datasets.Sequence(datasets.Value("string")),
65
+ "ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=['B-DATE',
66
+ 'B-GOVT_ID',
67
+ 'B-LOC',
68
+ 'B-LOCATION',
69
+ 'B-NORP',
70
+ 'B-ORG',
71
+ 'B-PERSON',
72
+ 'B-USERID',
73
+ 'B-USER_ID',
74
+ 'I-DATE',
75
+ 'I-GOVT_ID',
76
+ 'I-LOC',
77
+ 'I-LOCATION',
78
+ 'I-NORP',
79
+ 'I-ORG',
80
+ 'I-PERSON',
81
+ 'I-USERID',
82
+ 'I-USER_ID',
83
+ 'L-DATE',
84
+ 'L-GOVT_ID',
85
+ 'L-LOC',
86
+ 'L-LOCATION',
87
+ 'L-NORP',
88
+ 'L-ORG',
89
+ 'L-PERSON',
90
+ 'L-USERID',
91
+ 'L-USER_ID',
92
+ 'O',
93
+ 'U-DATE',
94
+ 'U-GOVT_ID',
95
+ 'U-LOCATION',
96
+ 'U-NORP',
97
+ 'U-ORG',
98
+ 'U-PERSON',
99
+ 'U-USERID'])),
100
+ }),
101
+ supervised_keys=None,
102
+ homepage="",
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ lang_code = self.config.name # 'lug', 'hau', etc.
108
+ urls_to_download = {
109
+ "train": f"{_URL}/{lang_code}/{_TRAINING_FILE}",
110
+ "val": f"{_URL}/{lang_code}/{_VAL_FILE}",
111
+ "test": f"{_URL}/{lang_code}/{_TEST_FILE}"
112
+ }
113
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
114
+
115
+ return [
116
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
117
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
118
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
119
+ ]
120
+
121
+
122
+ def _generate_examples(self, filepath):
123
+ logger.info("Generating examples from = %s", filepath)
124
+ with open(filepath, encoding="utf-8") as f:
125
+ guid = 0
126
+ tokens = []
127
+ ner_tags = []
128
+ for line in f:
129
+ if line.strip() == "":
130
+ if tokens:
131
+ yield guid, {"id": str(guid), "tokens": tokens, "ner_tags": ner_tags}
132
+ guid += 1
133
+ tokens = []
134
+ ner_tags = []
135
+ continue
136
+ splits = line.strip().split()
137
+ tokens.append(splits[0])
138
+ ner_tags.append(splits[1])
139
+ if tokens:
140
+ yield guid, {"id": str(guid), "tokens": tokens, "ner_tags": ner_tags}