Datasets:
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
English
Size:
10M - 100M
Tags:
text-search
License:
Commit
•
9e84715
0
Parent(s):
Update files from the datasets library (from 1.0.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.0.0
- .gitattributes +27 -0
- dataset_infos.json +1 -0
- wiki_snippets.py +207 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"wiki40b_en_100_0": {"description": "Wikipedia version split into plain text snippets for dense semantic indexing.\n", "citation": "@ONLINE {wikidump,\n author = \"Wikimedia Foundation\",\n title = \"Wikimedia Downloads\",\n url = \"https://dumps.wikimedia.org\"\n}\n", "homepage": "https://dumps.wikimedia.org", "license": "", "features": {"_id": {"dtype": "string", "id": null, "_type": "Value"}, "datasets_id": {"dtype": "int32", "id": null, "_type": "Value"}, "wiki_id": {"dtype": "string", "id": null, "_type": "Value"}, "start_paragraph": {"dtype": "int32", "id": null, "_type": "Value"}, "start_character": {"dtype": "int32", "id": null, "_type": "Value"}, "end_paragraph": {"dtype": "int32", "id": null, "_type": "Value"}, "end_character": {"dtype": "int32", "id": null, "_type": "Value"}, "article_title": {"dtype": "string", "id": null, "_type": "Value"}, "section_title": {"dtype": "string", "id": null, "_type": "Value"}, "passage_text": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wiki_snippets", "config_name": "wiki40b_en_100_0", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12864038411, "num_examples": 17553713, "dataset_name": "wiki_snippets"}}, "download_checksums": {}, "download_size": 0, "dataset_size": 12864038411, "size_in_bytes": 12864038411}, "wikipedia_en_100_0": {"description": "Wikipedia version split into plain text snippets for dense semantic indexing.\n", "citation": "@ONLINE {wikidump,\n author = \"Wikimedia Foundation\",\n title = \"Wikimedia Downloads\",\n url = \"https://dumps.wikimedia.org\"\n}\n", "homepage": "https://dumps.wikimedia.org", "license": "", "features": {"_id": {"dtype": "string", "id": null, "_type": "Value"}, "datasets_id": {"dtype": "int32", "id": null, "_type": "Value"}, "wiki_id": {"dtype": "string", "id": null, "_type": "Value"}, "start_paragraph": {"dtype": "int32", "id": null, "_type": "Value"}, "start_character": {"dtype": "int32", "id": null, "_type": "Value"}, "end_paragraph": {"dtype": "int32", "id": null, "_type": "Value"}, "end_character": {"dtype": "int32", "id": null, "_type": "Value"}, "article_title": {"dtype": "string", "id": null, "_type": "Value"}, "section_title": {"dtype": "string", "id": null, "_type": "Value"}, "passage_text": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wiki_snippets", "config_name": "wikipedia_en_100_0", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23837250095, "num_examples": 30820408, "dataset_name": "wiki_snippets"}}, "download_checksums": {}, "download_size": 0, "dataset_size": 23837250095, "size_in_bytes": 23837250095}}
|
wiki_snippets.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import logging
|
3 |
+
import math
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
|
8 |
+
_CITATION = """\
|
9 |
+
@ONLINE {wikidump,
|
10 |
+
author = {Wikimedia Foundation},
|
11 |
+
title = {Wikimedia Downloads},
|
12 |
+
url = {https://dumps.wikimedia.org}
|
13 |
+
}
|
14 |
+
"""
|
15 |
+
|
16 |
+
_DESCRIPTION = """\
|
17 |
+
Wikipedia version split into plain text snippets for dense semantic indexing.
|
18 |
+
"""
|
19 |
+
|
20 |
+
_LICENSE = (
|
21 |
+
"This work is licensed under the Creative Commons Attribution-ShareAlike "
|
22 |
+
"3.0 Unported License. To view a copy of this license, visit "
|
23 |
+
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
|
24 |
+
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
+
def wiki40b_article_snippets(article, passage_len=100, overlap=0):
|
29 |
+
paragraphs = article["text"].split("\n")
|
30 |
+
aticle_idx = paragraphs.index("_START_ARTICLE_") + 1
|
31 |
+
article_title = paragraphs[aticle_idx] if aticle_idx < len(paragraphs) else ""
|
32 |
+
section_indices = [i + 1 for i, par in enumerate(paragraphs[:-1]) if par == "_START_SECTION_"]
|
33 |
+
par_tabs = [par.split(" ") for par in paragraphs]
|
34 |
+
word_map = [
|
35 |
+
(i, len(" ".join(par[:j])), w)
|
36 |
+
for i, par in enumerate(par_tabs)
|
37 |
+
if not par[0].startswith("_START_")
|
38 |
+
for j, w in enumerate(par)
|
39 |
+
if i > 0
|
40 |
+
]
|
41 |
+
step_size = passage_len - overlap
|
42 |
+
passages = []
|
43 |
+
for i in range(math.ceil(len(word_map) / step_size)):
|
44 |
+
pre_toks = word_map[i * step_size : i * step_size + passage_len]
|
45 |
+
start_section_id = max([0] + [j for j in section_indices if j <= pre_toks[0][0]])
|
46 |
+
section_ids = [j for j in section_indices if j >= start_section_id and j <= pre_toks[-1][0]]
|
47 |
+
section_ids = section_ids if len(section_ids) > 0 else [0]
|
48 |
+
passage_text = " ".join([w for p_id, s_id, w in pre_toks])
|
49 |
+
passages += [
|
50 |
+
{
|
51 |
+
"article_title": article_title,
|
52 |
+
"section_title": " & ".join([paragraphs[j] for j in section_ids]),
|
53 |
+
"wiki_id": article["wikidata_id"],
|
54 |
+
"start_paragraph": pre_toks[0][0],
|
55 |
+
"start_character": pre_toks[0][1],
|
56 |
+
"end_paragraph": pre_toks[-1][0],
|
57 |
+
"end_character": pre_toks[-1][1] + len(pre_toks[-1][2]) + 1,
|
58 |
+
"passage_text": passage_text.replace("_NEWLINE_", "\n"),
|
59 |
+
}
|
60 |
+
]
|
61 |
+
return passages
|
62 |
+
|
63 |
+
|
64 |
+
def wikipedia_article_snippets(article, passage_len=100, overlap=0):
|
65 |
+
paragraphs = [par for par in article["text"].split("\n") if not par.startswith("Category:")]
|
66 |
+
if "References" in paragraphs:
|
67 |
+
paragraphs = paragraphs[: paragraphs.index("References")]
|
68 |
+
article_title = article["title"]
|
69 |
+
section_indices = [
|
70 |
+
i + 1
|
71 |
+
for i, par in enumerate(paragraphs[:-2])
|
72 |
+
if paragraphs[i] == "" and paragraphs[i + 1] != "" and paragraphs[i + 2] != ""
|
73 |
+
]
|
74 |
+
par_tabs = [par.split(" ") for par in paragraphs]
|
75 |
+
word_map = [(i, len(" ".join(par[:j])), w) for i, par in enumerate(par_tabs) for j, w in enumerate(par)]
|
76 |
+
step_size = passage_len - overlap
|
77 |
+
passages = []
|
78 |
+
for i in range(math.ceil(len(word_map) / step_size)):
|
79 |
+
pre_toks = word_map[i * step_size : i * step_size + passage_len]
|
80 |
+
start_section_id = max([0] + [j for j in section_indices if j <= pre_toks[0][0]])
|
81 |
+
section_ids = [j for j in section_indices if j >= start_section_id and j <= pre_toks[-1][0]]
|
82 |
+
section_ids = section_ids if len(section_ids) > 0 else [-1]
|
83 |
+
passage_text = " ".join([w for p_id, s_id, w in pre_toks])
|
84 |
+
passages += [
|
85 |
+
{
|
86 |
+
"article_title": article_title,
|
87 |
+
"section_title": " & ".join(["Start" if j == -1 else paragraphs[j].strip() for j in section_ids]),
|
88 |
+
"wiki_id": article_title.replace(" ", "_"),
|
89 |
+
"start_paragraph": pre_toks[0][0],
|
90 |
+
"start_character": pre_toks[0][1],
|
91 |
+
"end_paragraph": pre_toks[-1][0],
|
92 |
+
"end_character": pre_toks[-1][1] + len(pre_toks[-1][2]) + 1,
|
93 |
+
"passage_text": passage_text,
|
94 |
+
}
|
95 |
+
]
|
96 |
+
return passages
|
97 |
+
|
98 |
+
|
99 |
+
_SPLIT_FUCNTION_MAP = {
|
100 |
+
"wikipedia": wikipedia_article_snippets,
|
101 |
+
"wiki40b": wiki40b_article_snippets,
|
102 |
+
}
|
103 |
+
|
104 |
+
|
105 |
+
def generate_snippets(wikipedia, split_funtion, passage_len=100, overlap=0):
|
106 |
+
for i, article in enumerate(wikipedia):
|
107 |
+
for doc in split_funtion(article, passage_len, overlap):
|
108 |
+
part_id = json.dumps(
|
109 |
+
{
|
110 |
+
"datasets_id": i,
|
111 |
+
"wiki_id": doc["wiki_id"],
|
112 |
+
"sp": doc["start_paragraph"],
|
113 |
+
"sc": doc["start_character"],
|
114 |
+
"ep": doc["end_paragraph"],
|
115 |
+
"ec": doc["end_character"],
|
116 |
+
}
|
117 |
+
)
|
118 |
+
doc["_id"] = part_id
|
119 |
+
doc["datasets_id"] = i
|
120 |
+
yield doc
|
121 |
+
|
122 |
+
|
123 |
+
class WikiSnippetsConfig(datasets.BuilderConfig):
|
124 |
+
"""BuilderConfig for WikiSnippets."""
|
125 |
+
|
126 |
+
def __init__(
|
127 |
+
self, wikipedia_name="wiki40b", wikipedia_version_name="en", snippets_length=100, snippets_overlap=0, **kwargs
|
128 |
+
):
|
129 |
+
"""BuilderConfig for WikiSnippets.
|
130 |
+
Args:
|
131 |
+
**kwargs: keyword arguments forwarded to super.
|
132 |
+
"""
|
133 |
+
super(WikiSnippetsConfig, self).__init__(**kwargs)
|
134 |
+
self.wikipedia_name = wikipedia_name
|
135 |
+
self.wikipedia_version_name = wikipedia_version_name
|
136 |
+
self.snippets_length = snippets_length
|
137 |
+
self.snippets_overlap = snippets_overlap
|
138 |
+
|
139 |
+
|
140 |
+
class WikiSnippets(datasets.GeneratorBasedBuilder):
|
141 |
+
BUILDER_CONFIG_CLASS = WikiSnippetsConfig
|
142 |
+
BUILDER_CONFIGS = [
|
143 |
+
WikiSnippetsConfig(
|
144 |
+
name="wiki40b_en_100_0",
|
145 |
+
version=datasets.Version("1.0.0"),
|
146 |
+
wikipedia_name="wiki40b",
|
147 |
+
wikipedia_version_name="en",
|
148 |
+
snippets_length=100,
|
149 |
+
snippets_overlap=0,
|
150 |
+
),
|
151 |
+
WikiSnippetsConfig(
|
152 |
+
name="wikipedia_en_100_0",
|
153 |
+
version=datasets.Version("1.0.0"),
|
154 |
+
wikipedia_name="wikipedia",
|
155 |
+
wikipedia_version_name="20200501.en",
|
156 |
+
snippets_length=100,
|
157 |
+
snippets_overlap=0,
|
158 |
+
),
|
159 |
+
]
|
160 |
+
|
161 |
+
test_dummy_data = False
|
162 |
+
|
163 |
+
def _info(self):
|
164 |
+
return datasets.DatasetInfo(
|
165 |
+
description=_DESCRIPTION,
|
166 |
+
features=datasets.Features(
|
167 |
+
{
|
168 |
+
"_id": datasets.Value("string"),
|
169 |
+
"datasets_id": datasets.Value("int32"),
|
170 |
+
"wiki_id": datasets.Value("string"),
|
171 |
+
"start_paragraph": datasets.Value("int32"),
|
172 |
+
"start_character": datasets.Value("int32"),
|
173 |
+
"end_paragraph": datasets.Value("int32"),
|
174 |
+
"end_character": datasets.Value("int32"),
|
175 |
+
"article_title": datasets.Value("string"),
|
176 |
+
"section_title": datasets.Value("string"),
|
177 |
+
"passage_text": datasets.Value("string"),
|
178 |
+
}
|
179 |
+
),
|
180 |
+
supervised_keys=None,
|
181 |
+
homepage="https://dumps.wikimedia.org",
|
182 |
+
citation=_CITATION,
|
183 |
+
)
|
184 |
+
|
185 |
+
def _split_generators(self, dl_manager):
|
186 |
+
|
187 |
+
wikipedia = datasets.load_dataset(
|
188 |
+
path=self.config.wikipedia_name,
|
189 |
+
name=self.config.wikipedia_version_name,
|
190 |
+
)
|
191 |
+
|
192 |
+
return [
|
193 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"wikipedia": wikipedia}),
|
194 |
+
]
|
195 |
+
|
196 |
+
def _generate_examples(self, wikipedia):
|
197 |
+
logging.info(
|
198 |
+
"generating examples from = {} {}".format(self.config.wikipedia_name, self.config.wikipedia_version_name)
|
199 |
+
)
|
200 |
+
for split in wikipedia:
|
201 |
+
dset = wikipedia[split]
|
202 |
+
split_function = _SPLIT_FUCNTION_MAP[self.config.wikipedia_name]
|
203 |
+
for doc in generate_snippets(
|
204 |
+
dset, split_function, passage_len=self.config.snippets_length, overlap=self.config.snippets_overlap
|
205 |
+
):
|
206 |
+
id_ = doc["_id"]
|
207 |
+
yield id_, doc
|