stas commited on
Commit
873619f
1 Parent(s): 16b6c9f

build script

Browse files
Files changed (2) hide show
  1. openwebtext-10k.py +89 -0
  2. process.txt +57 -0
openwebtext-10k.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The Open WebText Corpus"""
16
+
17
+
18
+ import os
19
+ import re
20
+ from itertools import chain
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @misc{Gokaslan2019OpenWeb,
27
+ title={OpenWebText Corpus},
28
+ author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
29
+ howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
30
+ year={2019}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ An open-source replication of the WebText dataset from OpenAI.
36
+
37
+ This is a small subset representing the first 10K records from the original dataset - created for testing.
38
+
39
+ The full 8M-record dataset is at https://huggingface.co/datasets/openwebtext
40
+ """
41
+
42
+ _URL = "https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/openwebtext-10k.tar.xz"
43
+
44
+ class Openwebtext(datasets.GeneratorBasedBuilder):
45
+ """The Open WebText dataset."""
46
+
47
+ BUILDER_CONFIGS = [
48
+ datasets.BuilderConfig(
49
+ name="plain_text",
50
+ description="Plain text",
51
+ version=datasets.Version("1.0.0"),
52
+ )
53
+ ]
54
+
55
+ def _info(self):
56
+ return datasets.DatasetInfo(
57
+ description=_DESCRIPTION,
58
+ features=datasets.Features({"text": datasets.Value("string")}),
59
+ homepage="https://skylion007.github.io/OpenWebTextCorpus/",
60
+ citation=_CITATION,
61
+ )
62
+
63
+ def _split_generators(self, dl_manager):
64
+ dl_dir = dl_manager.download_and_extract(_URL)
65
+ owt_dir = os.path.join(dl_dir, "openwebtext-10k")
66
+ subset_xzs = [
67
+ os.path.join(owt_dir, file_name)
68
+ for file_name in sorted(os.listdir(owt_dir))
69
+ if file_name.endswith("xz") # filter out ...xz.lock
70
+ ]
71
+ ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
72
+ nested_txt_files = [
73
+ [
74
+ os.path.join(ex_dir, txt_file_name)
75
+ for txt_file_name in sorted(os.listdir(ex_dir))
76
+ if txt_file_name.endswith("txt")
77
+ ]
78
+ for ex_dir in ex_dirs
79
+ ]
80
+ txt_files = chain(*nested_txt_files)
81
+ return [
82
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
83
+ ]
84
+
85
+ def _generate_examples(self, txt_files):
86
+ """Yields examples."""
87
+ for idx, filepath in enumerate(txt_files):
88
+ with open(filepath, encoding="utf-8") as f:
89
+ yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
process.txt ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # this is a small derivative from 8M-big openwebtext dataset for testing
3
+
4
+ # how this build script and dataset_infos.json were generated
5
+
6
+ #
7
+
8
+ mkdir openwebtext-10k
9
+ cd openwebtext-10k
10
+
11
+ # data
12
+ wget https://zenodo.org/record/3834942/files/openwebtext.tar.xz
13
+ tar xf openwebtext.tar.xz
14
+ cd openwebtext
15
+ rename.pl 's|-|-00|; s|-00(\d\d\d)|-$1|; s|-00(\d\d)|-0$1|;' *xz
16
+
17
+ # now open the first 30 archives
18
+ mkdir subset
19
+ cp urlsf_subset00-0[0-2]*_data.xz subset
20
+ cd subset
21
+ find . -name "*xz" -exec tar xf {} \;
22
+ mkdir 10k
23
+ find . -name "*txt" | sort | head -10000 | xargs mv -t 10k
24
+ tar cfJ 10k.xz -C 10k .
25
+ mkdir openwebtext-10k
26
+ mv 10k.xz openwebtext-10k
27
+ tar cfJ openwebtext-10k.tar.xz openwebtext-10k
28
+ # the openwebtext subdir gets created on the fly
29
+ aws s3 cp openwebtext-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/openwebtext/
30
+
31
+ # script
32
+ wget https://raw.githubusercontent.com/huggingface/datasets/master/datasets/openwebtext/openwebtext.py
33
+ mv openwebtext.py openwebtext-10k.py
34
+ perl -pi -e 's|openwebtext|openwebtext-10k|g' openwebtext-10k.py
35
+ perl -pi -e 's|https://zenodo.org/record/3834942/files/|https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/|g' openwebtext-10k.py
36
+
37
+ # manually check that the script is correct - edit the descriptions
38
+
39
+ # create a new dataset entry on the hub
40
+ https://huggingface.co/new-dataset
41
+
42
+ # clone
43
+ git clone https://huggingface.co/datasets/stas/openwebtext-10k
44
+ cp openwebtext-10k.py process.txt openwebtext-10k
45
+ cd openwebtext-10k
46
+
47
+ git add openwebtext-10k.py process.txt
48
+ git commit -m "build script" openwebtext-10k.py process.txt
49
+ git push
50
+
51
+ # finally test
52
+ datasets-cli test stas/openwebtext-10k --save_infos --all_configs
53
+
54
+ # add push the generated config
55
+ git add dataset_infos.json
56
+ git commit -m "add dataset_infos.json" dataset_infos.json
57
+ git push