Datasets:

ArXiv:
License:
holylovenia commited on
Commit
11d88f0
1 Parent(s): e45820c

Upload ara_close.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ara_close.py +194 -0
ara_close.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ \
17
+ The dataset contribution of this study is a compilation of short fictional stories \
18
+ written in Bikol for readability assessment. The data was combined other collected \
19
+ Philippine language corpora, such as Tagalog and Cebuano. The data from these languages \
20
+ are all distributed across the Philippine elementary system's first three grade \
21
+ levels (L1, L2, L3). We sourced this dataset from Let's Read Asia (LRA), Bloom Library, \
22
+ Department of Education, and Adarna House.
23
+ """
24
+
25
+ from pathlib import Path
26
+ from typing import Dict, List, Tuple
27
+
28
+ import datasets
29
+
30
+ from seacrowd.utils import schemas
31
+ from seacrowd.utils.configs import SEACrowdConfig
32
+ from seacrowd.utils.constants import Licenses, Tasks
33
+
34
+ _CITATION = """\
35
+ @inproceedings{imperial-kochmar-2023-automatic,
36
+ title = "Automatic Readability Assessment for Closely Related Languages",
37
+ author = "Imperial, Joseph Marvin and
38
+ Kochmar, Ekaterina",
39
+ editor = "Rogers, Anna and
40
+ Boyd-Graber, Jordan and
41
+ Okazaki, Naoaki",
42
+ booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
43
+ month = jul,
44
+ year = "2023",
45
+ address = "Toronto, Canada",
46
+ publisher = "Association for Computational Linguistics",
47
+ url = "https://aclanthology.org/2023.findings-acl.331",
48
+ doi = "10.18653/v1/2023.findings-acl.331",
49
+ pages = "5371--5386",
50
+ abstract = "In recent years, the main focus of research on automatic readability assessment (ARA) \
51
+ has shifted towards using expensive deep learning-based methods with the primary goal of increasing models{'} accuracy. \
52
+ This, however, is rarely applicable for low-resource languages where traditional handcrafted features are still \
53
+ widely used due to the lack of existing NLP tools to extract deeper linguistic representations. In this work, \
54
+ we take a step back from the technical component and focus on how linguistic aspects such as mutual intelligibility \
55
+ or degree of language relatedness can improve ARA in a low-resource setting. We collect short stories written in three \
56
+ languages in the Philippines{---}Tagalog, Bikol, and Cebuano{---}to train readability assessment models and explore the \
57
+ interaction of data and features in various cross-lingual setups. Our results show that the inclusion of CrossNGO, \
58
+ a novel specialized feature exploiting n-gram overlap applied to languages with high mutual intelligibility, \
59
+ significantly improves the performance of ARA models compared to the use of off-the-shelf large multilingual \
60
+ language models alone. Consequently, when both linguistic representations are combined, we achieve state-of-the-art \
61
+ results for Tagalog and Cebuano, and baseline scores for ARA in Bikol.",
62
+ }
63
+ """
64
+
65
+ _DATASETNAME = "ara_close"
66
+
67
+ _DESCRIPTION = """\
68
+ The dataset contribution of this study is a compilation of short fictional stories \
69
+ written in Bikol for readability assessment. The data was combined other collected \
70
+ Philippine language corpora, such as Tagalog and Cebuano. The data from these languages \
71
+ are all distributed across the Philippine elementary system's first three grade \
72
+ levels (L1, L2, L3). We sourced this dataset from Let's Read Asia (LRA), Bloom Library, \
73
+ Department of Education, and Adarna House. \
74
+ """
75
+
76
+ _HOMEPAGE = "https://github.com/imperialite/ara-close-lang"
77
+
78
+ _LANGUAGES = ["bcl", "ceb"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
79
+
80
+ _LICENSE = Licenses.CC_BY_4_0.value # example: Licenses.MIT.value, Licenses.CC_BY_NC_SA_4_0.value, Licenses.UNLICENSE.value, Licenses.UNKNOWN.value
81
+
82
+ _LOCAL = False
83
+
84
+ _URLS = {
85
+ "bcl": "https://raw.githubusercontent.com/imperialite/ara-close-lang/main/data/bikol/bik_all_data.txt",
86
+ # 'tgl': '', # file for tgl language was deleted
87
+ "ceb": "https://raw.githubusercontent.com/imperialite/ara-close-lang/main/data/cebuano/ceb_all_data.txt",
88
+ }
89
+
90
+ _SUPPORTED_TASKS = [Tasks.READABILITY_ASSESSMENT]
91
+
92
+ _SOURCE_VERSION = "1.0.0"
93
+
94
+ _SEACROWD_VERSION = "2024.06.20"
95
+
96
+
97
+ class AraCloseDataset(datasets.GeneratorBasedBuilder):
98
+ f"""{_DESCRIPTION}"""
99
+
100
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
101
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
102
+
103
+ BUILDER_CONFIGS = [SEACrowdConfig(name=f"{_DATASETNAME}_{lang}_source", version=datasets.Version(_SOURCE_VERSION), description=f"{_DATASETNAME} source schema", schema="source", subset_id=f"{_DATASETNAME}",) for lang in _LANGUAGES] + [
104
+ SEACrowdConfig(
105
+ name=f"{_DATASETNAME}_{lang}_seacrowd_text",
106
+ version=datasets.Version(_SEACROWD_VERSION),
107
+ description=f"{_DATASETNAME} SEACrowd schema",
108
+ schema="seacrowd_text",
109
+ subset_id=f"{_DATASETNAME}",
110
+ )
111
+ for lang in _LANGUAGES
112
+ ]
113
+
114
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
115
+
116
+ def _info(self) -> datasets.DatasetInfo:
117
+
118
+ if self.config.schema == "source":
119
+ features = datasets.Features(
120
+ {
121
+ "title": datasets.Value("string"),
122
+ "text": datasets.Value("string"),
123
+ "label": datasets.Value("string"),
124
+ }
125
+ )
126
+
127
+ elif self.config.schema == "seacrowd_text":
128
+ features = schemas.text_features(["1", "2", "3"])
129
+
130
+ return datasets.DatasetInfo(
131
+ description=_DESCRIPTION,
132
+ features=features,
133
+ homepage=_HOMEPAGE,
134
+ license=_LICENSE,
135
+ citation=_CITATION,
136
+ )
137
+
138
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
139
+ """Returns SplitGenerators."""
140
+
141
+ lang = self.config.name.split("_")[2]
142
+ if lang in _LANGUAGES:
143
+ data_path = Path(dl_manager.download_and_extract(_URLS[lang]))
144
+ else:
145
+ data_path = [Path(dl_manager.download_and_extract(_URLS[lang])) for lang in _LANGUAGES]
146
+
147
+ return [
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.TRAIN,
150
+ gen_kwargs={
151
+ "filepath": data_path,
152
+ "split": "train",
153
+ },
154
+ )
155
+ ]
156
+
157
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
158
+ """Yields examples as (key, example) tuples."""
159
+ lang = self.config.name.split("_")[2]
160
+ if lang in _LANGUAGES:
161
+ file_content = open(filepath, "r").readlines()
162
+ else:
163
+ file_content = []
164
+ for path in filepath:
165
+ lines = open(path, "r").readlines()
166
+ file_content.extend(lines)
167
+
168
+ if self.config.schema == "source":
169
+ idx = 0
170
+ for line in file_content:
171
+ split_data = line.strip().split(",")
172
+ title = split_data[0]
173
+ label = split_data[1]
174
+ text = ",".join(split_data[2:])
175
+ ex = {"title": title, "text": text, "label": label}
176
+ yield idx, ex
177
+ idx += 1
178
+
179
+ elif self.config.schema == "seacrowd_text":
180
+ idx = 0
181
+ for line in file_content:
182
+ split_data = line.strip().split(",")
183
+ title = split_data[0]
184
+ label = split_data[1]
185
+ text = ",".join(split_data[2:])
186
+ ex = {
187
+ "id": idx,
188
+ "text": text,
189
+ "label": label,
190
+ }
191
+ yield idx, ex
192
+ idx += 1
193
+ else:
194
+ raise ValueError(f"Invalid config: {self.config.name}")