Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
parquet-converter commited on
Commit
8b8f617
·
1 Parent(s): eee5db4

Update parquet files

Browse files
wikitext-103-raw-v1/wikitext_document_level-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:903f6aa1c0dadaaef3b03fb8adc619771294fe009c2c590ff17055e3c09d746e
3
+ size 716614
wikitext-103-raw-v1/wikitext_document_level-train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53da76c848298003fa314b0eea474900040d735bd6a324e9d2cf40fdcdfc836e
3
+ size 289849160
wikitext-103-raw-v1/wikitext_document_level-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c755676615fa51fe1f6e6cef444580da1e395b2a787922cda81efcb272c3968
3
+ size 15008179
wikitext-103-raw-v1/wikitext_document_level-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43bdba2535acb44a7621e157e044f7b7ba59aa0d2959c28b0c0aa59f88ea0938
3
+ size 640823
wikitext-103-v1/wikitext_document_level-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1138f878a792ea9848a5d3276df2062d7c6fa2e0eb36d0d7780ab31ffc1f8391
3
+ size 707710
wikitext-103-v1/wikitext_document_level-train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:863decbf1d6873e38e3c3e2a81bd149fcc6b348ac4e228234278e6c14b2e0499
3
+ size 287542700
wikitext-103-v1/wikitext_document_level-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a06f3b80167f6d0a922b6d6b18eebdbf89f95240965e1f83569bebf5d4b6e617
3
+ size 14902486
wikitext-103-v1/wikitext_document_level-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e599fd0ff15f52a18b93dfbb86bb513fa08ca961437e00e2c0f01f5d899921e5
3
+ size 634765
wikitext-2-raw-v1/wikitext_document_level-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:903f6aa1c0dadaaef3b03fb8adc619771294fe009c2c590ff17055e3c09d746e
3
+ size 716614
wikitext-2-raw-v1/wikitext_document_level-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaacce42b7df31919073aeb8206c4cd9ea3d6387b3a07ad32d4cbd68a81737e2
3
+ size 6179187
wikitext-2-raw-v1/wikitext_document_level-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43bdba2535acb44a7621e157e044f7b7ba59aa0d2959c28b0c0aa59f88ea0938
3
+ size 640823
wikitext-2-v1/wikitext_document_level-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:575995e5258f94b724d50863e7248f276ac54cb64900b34fb563a4bdfae91b8f
3
+ size 663335
wikitext-2-v1/wikitext_document_level-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad03747185ee61e22edb7009feb86a6679aa65bfc2c3166a3378dc930dc9f563
3
+ size 5885419
wikitext-2-v1/wikitext_document_level-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7afc16f8636588b39dba6e096189aa9911a7ce26765dd58a07d05d53c2beb62c
3
+ size 599120
wikitext_document_level.py DELETED
@@ -1,252 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- #
15
- # NOTE: This is a modified version of https://github.com/huggingface/datasets/blob/master/datasets/wikitext/wikitext.py
16
- # that returns Wiki pages instead of Wiki text line-by-line.
17
- """WikiText Dataset."""
18
-
19
-
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @misc{merity2016pointer,
27
- title={Pointer Sentinel Mixture Models},
28
- author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},
29
- year={2016},
30
- eprint={1609.07843},
31
- archivePrefix={arXiv},
32
- primaryClass={cs.CL}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
38
- Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike
39
- License.
40
- """
41
- _HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
42
- _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
43
- _DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext"
44
-
45
-
46
- class WikitextConfig(datasets.BuilderConfig):
47
- """BuilderConfig for GLUE."""
48
-
49
- def __init__(self, data_url, **kwargs):
50
- """BuilderConfig for Wikitext
51
- Args:
52
- data_url: `string`, url to the dataset (word or raw level)
53
- **kwargs: keyword arguments forwarded to super.
54
- """
55
- super(WikitextConfig, self).__init__(
56
- version=datasets.Version(
57
- "1.0.0",
58
- ),
59
- **kwargs,
60
- )
61
- self.data_url = data_url
62
-
63
-
64
- class Wikitext(datasets.GeneratorBasedBuilder):
65
- """TODO(wikitext_103): Short description of my dataset."""
66
-
67
- # TODO(wikitext_103): Set up version.
68
- VERSION = datasets.Version("0.1.0")
69
- BUILDER_CONFIGS = [
70
- WikitextConfig(
71
- name="wikitext-103-v1",
72
- data_url=_DATA_URL + "/" + "wikitext-103-v1.zip",
73
- description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
74
- ),
75
- WikitextConfig(
76
- name="wikitext-2-v1",
77
- data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
78
- description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
79
- ),
80
- WikitextConfig(
81
- name="wikitext-103-raw-v1",
82
- data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
83
- description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
84
- "They should only be used for character level work or for creating newly derived datasets.",
85
- ),
86
- WikitextConfig(
87
- name="wikitext-2-raw-v1",
88
- data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
89
- description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
90
- "They should only be used for character level work or for creating newly derived datasets.",
91
- ),
92
- ]
93
-
94
- def _info(self):
95
- # TODO(wikitext): Specifies the datasets.DatasetInfo object
96
- return datasets.DatasetInfo(
97
- # This is the description that will appear on the datasets page.
98
- description=_DESCRIPTION,
99
- # datasets.features.FeatureConnectors
100
- features=datasets.Features(
101
- {
102
- "page": datasets.Value("string")
103
- # These are the features of your dataset like images, labels ...
104
- }
105
- ),
106
- # If there's a common (input, target) tuple from the features,
107
- # specify them here. They'll be used if as_supervised=True in
108
- # builder.as_dataset.
109
- supervised_keys=None,
110
- homepage=_HOMEPAGE,
111
- license=_LICENSE,
112
- citation=_CITATION,
113
- )
114
-
115
- def _split_generators(self, dl_manager):
116
- """Returns SplitGenerators."""
117
- # TODO(wikitext): Downloads the data and defines the splits
118
- # dl_manager is a datasets.download.DownloadManager that can be used to
119
- # download and extract URLs
120
- if self.config.name == "wikitext-103-v1":
121
- data_file = dl_manager.download_and_extract(self.config.data_url)
122
- data_dir = os.path.join(data_file, "wikitext-103")
123
- return [
124
- datasets.SplitGenerator(
125
- name=datasets.Split.TEST,
126
- gen_kwargs={
127
- "data_file": os.path.join(data_dir, "wiki.test.tokens"),
128
- "split": "test",
129
- },
130
- ),
131
- datasets.SplitGenerator(
132
- name=datasets.Split.TRAIN,
133
- gen_kwargs={
134
- "data_file": os.path.join(data_dir, "wiki.train.tokens"),
135
- "split": "train",
136
- },
137
- ),
138
- datasets.SplitGenerator(
139
- name=datasets.Split.VALIDATION,
140
- gen_kwargs={
141
- "data_file": os.path.join(data_dir, "wiki.valid.tokens"),
142
- "split": "valid",
143
- },
144
- ),
145
- ]
146
- else:
147
- if self.config.name == "wikitext-103-raw-v1":
148
- data_file = dl_manager.download_and_extract(self.config.data_url)
149
- data_dir = os.path.join(data_file, "wikitext-103-raw")
150
- return [
151
- datasets.SplitGenerator(
152
- name=datasets.Split.TEST,
153
- gen_kwargs={
154
- "data_file": os.path.join(data_dir, "wiki.test.raw"),
155
- "split": "test",
156
- },
157
- ),
158
- datasets.SplitGenerator(
159
- name=datasets.Split.TRAIN,
160
- gen_kwargs={
161
- "data_file": os.path.join(data_dir, "wiki.train.raw"),
162
- "split": "train",
163
- },
164
- ),
165
- datasets.SplitGenerator(
166
- name=datasets.Split.VALIDATION,
167
- gen_kwargs={
168
- "data_file": os.path.join(data_dir, "wiki.valid.raw"),
169
- "split": "valid",
170
- },
171
- ),
172
- ]
173
- else:
174
- if self.config.name == "wikitext-2-raw-v1":
175
- data_file = dl_manager.download_and_extract(self.config.data_url)
176
- data_dir = os.path.join(data_file, "wikitext-2-raw")
177
- return [
178
- datasets.SplitGenerator(
179
- name=datasets.Split.TEST,
180
- gen_kwargs={
181
- "data_file": os.path.join(data_dir, "wiki.test.raw"),
182
- "split": "test",
183
- },
184
- ),
185
- datasets.SplitGenerator(
186
- name=datasets.Split.TRAIN,
187
- gen_kwargs={
188
- "data_file": os.path.join(data_dir, "wiki.train.raw"),
189
- "split": "train",
190
- },
191
- ),
192
- datasets.SplitGenerator(
193
- name=datasets.Split.VALIDATION,
194
- gen_kwargs={
195
- "data_file": os.path.join(data_dir, "wiki.valid.raw"),
196
- "split": "valid",
197
- },
198
- ),
199
- ]
200
- else:
201
- if self.config.name == "wikitext-2-v1":
202
- data_file = dl_manager.download_and_extract(
203
- self.config.data_url
204
- )
205
- data_dir = os.path.join(data_file, "wikitext-2")
206
- return [
207
- datasets.SplitGenerator(
208
- name=datasets.Split.TEST,
209
- gen_kwargs={
210
- "data_file": os.path.join(
211
- data_dir, "wiki.test.tokens"
212
- ),
213
- "split": "test",
214
- },
215
- ),
216
- datasets.SplitGenerator(
217
- name=datasets.Split.TRAIN,
218
- gen_kwargs={
219
- "data_file": os.path.join(
220
- data_dir, "wiki.train.tokens"
221
- ),
222
- "split": "train",
223
- },
224
- ),
225
- datasets.SplitGenerator(
226
- name=datasets.Split.VALIDATION,
227
- gen_kwargs={
228
- "data_file": os.path.join(
229
- data_dir, "wiki.valid.tokens"
230
- ),
231
- "split": "valid",
232
- },
233
- ),
234
- ]
235
-
236
- def _generate_examples(self, data_file, split):
237
- """Yields examples."""
238
- with open(data_file, encoding="utf-8") as f:
239
- key = 0
240
- ret = []
241
- data = f.read().split("\n")
242
- for line in data:
243
- rline = line.replace("= = =", "===").replace("= =", "==").strip()
244
- if rline.startswith("= ") and rline.strip().endswith(" ="):
245
- page = "\n".join(ret)
246
- if page.strip():
247
- yield key, {"page": page}
248
- key += 1
249
- ret = []
250
- ret.append(line)
251
- page = "\n".join(ret)
252
- yield key, {"page": page}