gsarti commited on
Commit
1fc7f0a
1 Parent(s): 97b7204

Create clean_mc4_it.py

Browse files
Files changed (1) hide show
  1. clean_mc4_it.py +176 -0
clean_mc4_it.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Cleaned Italian split of the mC4 corpus."""
16
+
17
+
18
+ import json
19
+ import gzip
20
+ import textwrap
21
+ import datasets
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+ _CITATION = """
26
+ @article{JMLR:v21:20-074,
27
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
28
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
29
+ journal = {Journal of Machine Learning Research},
30
+ year = {2020},
31
+ volume = {21},
32
+ number = {140},
33
+ pages = {1-67},
34
+ url = {http://jmlr.org/papers/v21/20-074.html}
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ A thoroughly cleaned version of the Italian portion of the multilingual
40
+ colossal, cleaned version of Common Crawl's web crawl corpus (mC4) by AllenAI.
41
+
42
+ Based on Common Crawl dataset: "https://commoncrawl.org".
43
+
44
+ This is the processed version of Google's mC4 dataset by AllenAI, with further cleaning
45
+ detailed in the repository README file.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/allenai/allennlp/discussions/5056"
49
+
50
+ _LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0"
51
+
52
+ _BASE_URL = "https://huggingface.co/datasets/gsarti/clean-mc4-it/resolve/main/clean-mc4-it/c4-it{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
53
+
54
+ _CONFIGS = {
55
+ "tiny": {"train": 100, "validation": 1},
56
+ "small": {"train": 250, "validation": 2},
57
+ "medium": {"train": 500, "validation": 4},
58
+ "large": {"train": 750, "validation": 6},
59
+ "full": {"train": 1024, "validation": 8}
60
+ }
61
+
62
+
63
+ class CleanMc4ItConfig(datasets.BuilderConfig):
64
+ """BuilderConfig for the Clean mC4 Italian."""
65
+
66
+ def __init__(self, **kwargs):
67
+ """BuilderConfig for Clean mC4 Italian.
68
+ Args:
69
+ **kwargs: keyword arguments forwarded to super.
70
+ """
71
+ super().__init__(**kwargs)
72
+
73
+
74
+ class Mc4(datasets.GeneratorBasedBuilder):
75
+ """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
76
+
77
+ BUILDER_CONFIGS = [
78
+ CleanMc4ItConfig(
79
+ name=_CONFIGS[0], # tiny
80
+ version=datasets.Version("1.0.0"),
81
+ description=textwrap.dedent(
82
+ f"""\
83
+ A tiny cleaned version of the Italian portion of the multilingual C4 corpus.
84
+ Estimated size of compressed files: 10GB
85
+ """
86
+ )
87
+ ),
88
+ CleanMc4ItConfig(
89
+ name=_CONFIGS[1], # small
90
+ version=datasets.Version("1.0.0"),
91
+ description=textwrap.dedent(
92
+ f"""\
93
+ A small cleaned version of the Italian portion of the multilingual C4 corpus.
94
+ Estimated size of compressed files: 25GB
95
+ """
96
+ )
97
+ ),
98
+ CleanMc4ItConfig(
99
+ name=_CONFIGS[2], # medium
100
+ version=datasets.Version("1.0.0"),
101
+ description=textwrap.dedent(
102
+ f"""\
103
+ A medium cleaned version of the Italian portion of the multilingual C4 corpus.
104
+ Estimated size of compressed files: 50GB
105
+ """
106
+ )
107
+ ),
108
+ CleanMc4ItConfig(
109
+ name=_CONFIGS[3], # large
110
+ version=datasets.Version("1.0.0"),
111
+ description=textwrap.dedent(
112
+ f"""\
113
+ A large cleaned version of the Italian portion of the multilingual C4 corpus.
114
+ Estimated size of compressed files: 75GB
115
+ """
116
+ )
117
+ ),
118
+ CleanMc4ItConfig(
119
+ name=_CONFIGS[4], # full
120
+ version=datasets.Version("1.0.0"),
121
+ description=textwrap.dedent(
122
+ f"""\
123
+ The full cleaned version of the Italian portion of the multilingual C4 corpus.
124
+ Estimated size of compressed files: 103GB
125
+ """
126
+ )
127
+ )
128
+ ]
129
+
130
+ def _info(self):
131
+ return datasets.DatasetInfo(
132
+ description=_DESCRIPTION,
133
+ features=datasets.Features(
134
+ {
135
+ "text": datasets.Value("string"),
136
+ "timestamp": datasets.Value("string"),
137
+ "url": datasets.Value("string"),
138
+ }
139
+ ),
140
+ supervised_keys=None,
141
+ homepage=_HOMEPAGE,
142
+ license=_LICENSE,
143
+ citation=_CITATION,
144
+ )
145
+
146
+ def _split_generators(self, dl_manager):
147
+ data_urls = {}
148
+ for split in ["train", "validation"]:
149
+ data_urls[split] = [
150
+ _BASE_URL.format(
151
+ split_suffix="-validation" if split == "validation" else "",
152
+ index=index,
153
+ n_shards=_CONFIGS[self.config.name][split],
154
+ )
155
+ for index in range(_CONFIGS[self.config.name][split])
156
+ ]
157
+ train_downloaded_files = dl_manager.download(data_urls["train"])
158
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
159
+ return [
160
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
161
+ datasets.SplitGenerator(
162
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
163
+ ),
164
+ ]
165
+
166
+ def _generate_examples(self, filepaths):
167
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
168
+ id_ = 0
169
+ for filepath in filepaths:
170
+ logger.info(f"Generating examples from {filepath}")
171
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
172
+ for line in f:
173
+ if line:
174
+ example = json.loads(line)
175
+ yield id_, example
176
+ id_ += 1