albertvillanova HF staff commited on
Commit
e06be61
1 Parent(s): 72a7fea

Delete loading script

Browse files
Files changed (1) hide show
  1. onestop_english.py +0 -135
onestop_english.py DELETED
@@ -1,135 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """OneStopEnglish Corpus: Dataset of texts classified into reading levels/text complexities."""
16
-
17
-
18
- import os
19
-
20
- import datasets
21
- from datasets.tasks import TextClassification
22
-
23
-
24
- logger = datasets.logging.get_logger(__name__)
25
-
26
-
27
- _CITATION = """\
28
- @inproceedings{vajjala-lucic-2018-onestopenglish,
29
- title = {OneStopEnglish corpus: A new corpus for automatic readability assessment and text simplification},
30
- author = {Sowmya Vajjala and Ivana Lučić},
31
- booktitle = {Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications},
32
- year = {2018}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- This dataset is a compilation of the OneStopEnglish corpus of texts written at three reading levels into one file.
38
- Text documents are classified into three reading levels - ele, int, adv (Elementary, Intermediate and Advance).
39
- This dataset demonstrates its usefulness for through two applica-tions - automatic readability assessment and automatic text simplification.
40
- The corpus consists of 189 texts, each in three versions/reading levels (567 in total).
41
- """
42
-
43
- _HOMEPAGE = "https://github.com/nishkalavallabhi/OneStopEnglishCorpus"
44
-
45
- _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International License"
46
-
47
- _URL = "https://github.com/purvimisal/OneStopCorpus-Compiled/raw/main/Texts-SeparatedByReadingLevel.zip"
48
-
49
-
50
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
51
- class OnestopEnglish(datasets.GeneratorBasedBuilder):
52
- """OneStopEnglish Corpus: Dataset of texts classified into reading levels"""
53
-
54
- VERSION = datasets.Version("1.1.0")
55
-
56
- def _info(self):
57
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
58
- return datasets.DatasetInfo(
59
- description=_DESCRIPTION,
60
- features=datasets.Features(
61
- {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["ele", "int", "adv"])}
62
- ),
63
- supervised_keys=[""],
64
- homepage=_HOMEPAGE,
65
- license=_LICENSE,
66
- citation=_CITATION,
67
- task_templates=[TextClassification(text_column="text", label_column="label")],
68
- )
69
-
70
- def _vocab_text_gen(self, train_file):
71
- for _, ex in self._generate_examples(train_file):
72
- yield ex["text"]
73
-
74
- def _split_generators(self, dl_manager):
75
- """Downloads OneStopEnglish corpus"""
76
- extracted_folder_path = dl_manager.download_and_extract(_URL)
77
- return [
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TRAIN,
80
- gen_kwargs={"split_key": "train", "data_dir": extracted_folder_path},
81
- )
82
- ]
83
-
84
- def _get_examples_from_split(self, split_key, data_dir):
85
- """Reads the downloaded and extracted files and combines the individual text files to one dataset."""
86
-
87
- data_dir = os.path.join(data_dir, "Texts-SeparatedByReadingLevel")
88
-
89
- ele_samples = []
90
- dir_path = os.path.join(data_dir, "Ele-Txt")
91
- files = os.listdir(dir_path)
92
- for f in sorted(files):
93
- try:
94
- with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
95
- text = myfile.read().strip()
96
- ele_samples.append(text)
97
- except Exception as e:
98
- logger.info("Error with:", os.path.join(dir_path, f), e)
99
-
100
- int_samples = []
101
- dir_path = os.path.join(data_dir, "Int-Txt")
102
- files = os.listdir(dir_path)
103
- for f in sorted(files):
104
- try:
105
- with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
106
- text = myfile.read().strip()
107
- int_samples.append(text)
108
- except Exception as e:
109
- logger.info("Error with:", os.path.join(dir_path, f), e)
110
-
111
- adv_samples = []
112
- dir_path = os.path.join(data_dir, "Adv-Txt")
113
- files = os.listdir(dir_path)
114
- for f in sorted(files):
115
- try:
116
- with open(os.path.join(dir_path, f), encoding="utf-8-sig") as myfile:
117
- text = myfile.read().strip()
118
- adv_samples.append(text)
119
- except Exception as e:
120
- logger.info("Error with:", os.path.join(dir_path, f), e)
121
-
122
- train_samples = ele_samples + int_samples + adv_samples
123
- train_labels = (["ele"] * len(ele_samples)) + (["int"] * len(int_samples)) + (["adv"] * len(adv_samples))
124
-
125
- if split_key == "train":
126
- return (train_samples, train_labels)
127
- else:
128
- raise ValueError(f"Invalid split key {split_key}")
129
-
130
- def _generate_examples(self, split_key, data_dir):
131
- """Yields examples for a given split of dataset."""
132
- split_text, split_labels = self._get_examples_from_split(split_key, data_dir)
133
- for id_, (text, label) in enumerate(zip(split_text, split_labels)):
134
- feature_dict = {"text": text, "label": label}
135
- yield id_, feature_dict