holylovenia commited on
Commit
0d88a52
1 Parent(s): 9e12d37

Upload indonli.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indonli.py +191 -0
indonli.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ IndoNLI is the first human-elicited Natural Language Inference (NLI) dataset for Indonesian.
18
+ IndoNLI is annotated by both crowd workers and experts. The expert-annotated data is used exclusively as a test set.
19
+ It is designed to provide a challenging test-bed for Indonesian NLI by explicitly incorporating various linguistic
20
+ phenomena such as numerical reasoning, structural changes, idioms, or temporal and spatial reasoning.
21
+
22
+ The data is split across train, valid, test_lay, and test_expert.
23
+
24
+ A small subset of test_expert is used as a diasnostic tool. For more info, please visit https://github.com/ir-nlp-csui/indonli
25
+
26
+ The premise were collected from Indonesian Wikipedia and from other public Indonesian dataset: Indonesian PUD and GSD treebanks provided by the Universal Dependencies 2.5 and IndoSum
27
+
28
+ The data was produced by humans.
29
+
30
+ """
31
+
32
+ from pathlib import Path
33
+ from typing import List
34
+
35
+ import datasets
36
+ import jsonlines
37
+
38
+ from nusacrowd.utils import schemas
39
+ from nusacrowd.utils.configs import NusantaraConfig
40
+ from nusacrowd.utils.constants import Tasks
41
+
42
+ _CITATION = """\
43
+ @inproceedings{mahendra-etal-2021-indonli,
44
+ title = "{I}ndo{NLI}: A Natural Language Inference Dataset for {I}ndonesian",
45
+ author = "Mahendra, Rahmad and Aji, Alham Fikri and Louvan, Samuel and Rahman, Fahrurrozi and Vania, Clara",
46
+ booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
47
+ month = nov,
48
+ year = "2021",
49
+ address = "Online and Punta Cana, Dominican Republic",
50
+ publisher = "Association for Computational Linguistics",
51
+ url = "https://aclanthology.org/2021.emnlp-main.821",
52
+ pages = "10511--10527",
53
+ }
54
+ """
55
+
56
+ _LOCAL = False
57
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
58
+ _DATASETNAME = "indonli"
59
+
60
+ _DESCRIPTION = """\
61
+ This dataset is designed for Natural Language Inference NLP task. It is designed to provide a challenging test-bed
62
+ for Indonesian NLI by explicitly incorporating various linguistic phenomena such as numerical reasoning, structural
63
+ changes, idioms, or temporal and spatial reasoning.
64
+ """
65
+
66
+ _HOMEPAGE = "https://github.com/ir-nlp-csui/indonli"
67
+
68
+ _LICENSE = "Creative Common Attribution Share-Alike 4.0 International"
69
+
70
+ # For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
71
+ # In most cases the URLs will be the same for the source and nusantara config.
72
+ # However, if you need to access different files for each config you can have multiple entries in this dict.
73
+ # This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
74
+ _URLS = {
75
+ _DATASETNAME: {
76
+ "train": "https://raw.githubusercontent.com/ir-nlp-csui/indonli/main/data/indonli/train.jsonl",
77
+ "valid": "https://raw.githubusercontent.com/ir-nlp-csui/indonli/main/data/indonli/val.jsonl",
78
+ "test": "https://raw.githubusercontent.com/ir-nlp-csui/indonli/main/data/indonli/test.jsonl",
79
+ }
80
+ }
81
+
82
+ _SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
83
+
84
+ _SOURCE_VERSION = "1.1.0" # Mentioned in https://github.com/huggingface/datasets/blob/main/datasets/indonli/indonli.py
85
+
86
+ _NUSANTARA_VERSION = "1.0.0"
87
+
88
+
89
+ class IndoNli(datasets.GeneratorBasedBuilder):
90
+ """IndoNLI, a human-elicited NLI dataset for Indonesian containing ~18k sentence pairs annotated by crowd workers."""
91
+
92
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
93
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
94
+
95
+ BUILDER_CONFIGS = [
96
+ NusantaraConfig(
97
+ name="indonli_source",
98
+ version=SOURCE_VERSION,
99
+ description="indonli source schema",
100
+ schema="source",
101
+ subset_id="indonli",
102
+ ),
103
+ NusantaraConfig(
104
+ name="indonli_nusantara_pairs",
105
+ version=NUSANTARA_VERSION,
106
+ description="indonli Nusantara schema",
107
+ schema="nusantara_pairs",
108
+ subset_id="indonli",
109
+ ),
110
+ ]
111
+
112
+ DEFAULT_CONFIG_NAME = "indonli_source"
113
+ labels = ["c", "e", "n"]
114
+
115
+ def _info(self) -> datasets.DatasetInfo:
116
+
117
+ if self.config.schema == "source":
118
+ features = datasets.Features(
119
+ {
120
+ "pair_id": datasets.Value("int32"),
121
+ "premise_id": datasets.Value("int32"),
122
+ "premise": datasets.Value("string"),
123
+ "hypothesis": datasets.Value("string"),
124
+ "annotator_type": datasets.Value("string"),
125
+ "sentence_size": datasets.Value("string"),
126
+ "label": datasets.Value("string"),
127
+ }
128
+ )
129
+ elif self.config.schema == "nusantara_pairs":
130
+ features = schemas.pairs_features(self.labels)
131
+
132
+ return datasets.DatasetInfo(
133
+ description=_DESCRIPTION,
134
+ features=features,
135
+ homepage=_HOMEPAGE,
136
+ license=_LICENSE,
137
+ citation=_CITATION,
138
+ )
139
+
140
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
141
+ urls = _URLS[_DATASETNAME]
142
+ train_data_path = Path(dl_manager.download_and_extract(urls["train"]))
143
+ valid_data_path = Path(dl_manager.download_and_extract(urls["valid"]))
144
+ test_data_path = Path(dl_manager.download_and_extract(urls["test"]))
145
+
146
+ return [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TRAIN,
149
+ gen_kwargs={"filepath": train_data_path},
150
+ ),
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.VALIDATION,
153
+ gen_kwargs={"filepath": valid_data_path},
154
+ ),
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.TEST,
157
+ gen_kwargs={"filepath": test_data_path},
158
+ ),
159
+ ]
160
+
161
+ def _generate_examples(self, filepath: Path):
162
+
163
+ if self.config.schema == "source":
164
+ print(filepath)
165
+ with jsonlines.open(filepath) as f:
166
+ skip = [] # To avoid duplicate IDs
167
+ for example in f.iter():
168
+ if example["pair_id"] not in skip:
169
+ skip.append(example["pair_id"])
170
+ example = {
171
+ "pair_id": example["pair_id"],
172
+ "premise_id": example["premise_id"],
173
+ "premise": example["premise"],
174
+ "hypothesis": example["hypothesis"],
175
+ "annotator_type": example["annotator_type"],
176
+ "sentence_size": example["sentence_size"],
177
+ "label": example["label"],
178
+ }
179
+ yield example["pair_id"], example
180
+
181
+ elif self.config.schema == "nusantara_pairs":
182
+ print(filepath)
183
+ with jsonlines.open(filepath) as f:
184
+ skip = [] # To avoid duplicate IDs
185
+ for example in f.iter():
186
+ if example["pair_id"] not in skip:
187
+ skip.append(example["pair_id"])
188
+ nu_eg = {"id": str(example["pair_id"]), "text_1": example["premise"], "text_2": example["hypothesis"], "label": example["label"]}
189
+ yield example["pair_id"], nu_eg
190
+ else:
191
+ raise ValueError(f"Invalid config: {self.config.name}")