albertvillanova HF staff commited on
Commit
b2cb1e0
1 Parent(s): 22bacfc

Delete loading script

Browse files
Files changed (1) hide show
  1. wiki_snippets.py +0 -210
wiki_snippets.py DELETED
@@ -1,210 +0,0 @@
1
- # WARNING: Please, do not use the code in this script as a template to create another script:
2
- # - It is a bad practice to use `datasets.load_dataset` inside a loading script. Please, avoid doing it.
3
-
4
- import json
5
- import math
6
-
7
- import datasets
8
-
9
-
10
- logger = datasets.logging.get_logger(__name__)
11
-
12
-
13
- _CITATION = """\
14
- @ONLINE {wikidump,
15
- author = {Wikimedia Foundation},
16
- title = {Wikimedia Downloads},
17
- url = {https://dumps.wikimedia.org}
18
- }
19
- """
20
-
21
- _DESCRIPTION = """\
22
- Wikipedia version split into plain text snippets for dense semantic indexing.
23
- """
24
-
25
- _LICENSE = (
26
- "This work is licensed under the Creative Commons Attribution-ShareAlike "
27
- "3.0 Unported License. To view a copy of this license, visit "
28
- "http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
29
- "Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
30
- )
31
-
32
-
33
- def wiki40b_article_snippets(article, passage_len=100, overlap=0):
34
- paragraphs = article["text"].split("\n")
35
- aticle_idx = paragraphs.index("_START_ARTICLE_") + 1
36
- article_title = paragraphs[aticle_idx] if aticle_idx < len(paragraphs) else ""
37
- section_indices = [i + 1 for i, par in enumerate(paragraphs[:-1]) if par == "_START_SECTION_"]
38
- par_tabs = [par.split(" ") for par in paragraphs]
39
- word_map = [
40
- (i, len(" ".join(par[:j])), w)
41
- for i, par in enumerate(par_tabs)
42
- if not par[0].startswith("_START_")
43
- for j, w in enumerate(par)
44
- if i > 0
45
- ]
46
- step_size = passage_len - overlap
47
- passages = []
48
- for i in range(math.ceil(len(word_map) / step_size)):
49
- pre_toks = word_map[i * step_size : i * step_size + passage_len]
50
- start_section_id = max([0] + [j for j in section_indices if j <= pre_toks[0][0]])
51
- section_ids = [j for j in section_indices if j >= start_section_id and j <= pre_toks[-1][0]]
52
- section_ids = section_ids if len(section_ids) > 0 else [0]
53
- passage_text = " ".join([w for p_id, s_id, w in pre_toks])
54
- passages += [
55
- {
56
- "article_title": article_title,
57
- "section_title": " & ".join([paragraphs[j] for j in section_ids]),
58
- "wiki_id": article["wikidata_id"],
59
- "start_paragraph": pre_toks[0][0],
60
- "start_character": pre_toks[0][1],
61
- "end_paragraph": pre_toks[-1][0],
62
- "end_character": pre_toks[-1][1] + len(pre_toks[-1][2]) + 1,
63
- "passage_text": passage_text.replace("_NEWLINE_", "\n"),
64
- }
65
- ]
66
- return passages
67
-
68
-
69
- def wikipedia_article_snippets(article, passage_len=100, overlap=0):
70
- paragraphs = [par for par in article["text"].split("\n") if not par.startswith("Category:")]
71
- if "References" in paragraphs:
72
- paragraphs = paragraphs[: paragraphs.index("References")]
73
- article_title = article["title"]
74
- section_indices = [
75
- i + 1
76
- for i, par in enumerate(paragraphs[:-2])
77
- if paragraphs[i] == "" and paragraphs[i + 1] != "" and paragraphs[i + 2] != ""
78
- ]
79
- par_tabs = [par.split(" ") for par in paragraphs]
80
- word_map = [(i, len(" ".join(par[:j])), w) for i, par in enumerate(par_tabs) for j, w in enumerate(par)]
81
- step_size = passage_len - overlap
82
- passages = []
83
- for i in range(math.ceil(len(word_map) / step_size)):
84
- pre_toks = word_map[i * step_size : i * step_size + passage_len]
85
- start_section_id = max([0] + [j for j in section_indices if j <= pre_toks[0][0]])
86
- section_ids = [j for j in section_indices if start_section_id <= j <= pre_toks[-1][0]]
87
- section_ids = section_ids if len(section_ids) > 0 else [-1]
88
- passage_text = " ".join([w for p_id, s_id, w in pre_toks])
89
- passages += [
90
- {
91
- "article_title": article_title,
92
- "section_title": " & ".join(["Start" if j == -1 else paragraphs[j].strip() for j in section_ids]),
93
- "wiki_id": article_title.replace(" ", "_"),
94
- "start_paragraph": pre_toks[0][0],
95
- "start_character": pre_toks[0][1],
96
- "end_paragraph": pre_toks[-1][0],
97
- "end_character": pre_toks[-1][1] + len(pre_toks[-1][2]) + 1,
98
- "passage_text": passage_text,
99
- }
100
- ]
101
- return passages
102
-
103
-
104
- _SPLIT_FUNCTION_MAP = {
105
- "wikipedia": wikipedia_article_snippets,
106
- "wiki40b": wiki40b_article_snippets,
107
- }
108
-
109
-
110
- def generate_snippets(wikipedia, split_function, passage_len=100, overlap=0):
111
- for i, article in enumerate(wikipedia):
112
- for doc in split_function(article, passage_len, overlap):
113
- part_id = json.dumps(
114
- {
115
- "datasets_id": i,
116
- "wiki_id": doc["wiki_id"],
117
- "sp": doc["start_paragraph"],
118
- "sc": doc["start_character"],
119
- "ep": doc["end_paragraph"],
120
- "ec": doc["end_character"],
121
- }
122
- )
123
- doc["_id"] = part_id
124
- doc["datasets_id"] = i
125
- yield doc
126
-
127
-
128
- class WikiSnippetsConfig(datasets.BuilderConfig):
129
- """BuilderConfig for WikiSnippets."""
130
-
131
- def __init__(
132
- self, wikipedia_name="wiki40b", wikipedia_version_name="en", snippets_length=100, snippets_overlap=0, **kwargs
133
- ):
134
- """BuilderConfig for WikiSnippets.
135
- Args:
136
- **kwargs: keyword arguments forwarded to super.
137
- """
138
- super(WikiSnippetsConfig, self).__init__(**kwargs)
139
- self.wikipedia_name = wikipedia_name
140
- self.wikipedia_version_name = wikipedia_version_name
141
- self.snippets_length = snippets_length
142
- self.snippets_overlap = snippets_overlap
143
-
144
-
145
- class WikiSnippets(datasets.GeneratorBasedBuilder):
146
- BUILDER_CONFIG_CLASS = WikiSnippetsConfig
147
- BUILDER_CONFIGS = [
148
- WikiSnippetsConfig(
149
- name="wiki40b_en_100_0",
150
- version=datasets.Version("1.0.0"),
151
- wikipedia_name="wiki40b",
152
- wikipedia_version_name="en",
153
- snippets_length=100,
154
- snippets_overlap=0,
155
- ),
156
- WikiSnippetsConfig(
157
- name="wikipedia_en_100_0",
158
- version=datasets.Version("2.0.0"),
159
- wikipedia_name="wikipedia",
160
- wikipedia_version_name="20220301.en",
161
- snippets_length=100,
162
- snippets_overlap=0,
163
- ),
164
- ]
165
-
166
- test_dummy_data = False
167
-
168
- def _info(self):
169
- return datasets.DatasetInfo(
170
- description=_DESCRIPTION,
171
- features=datasets.Features(
172
- {
173
- "_id": datasets.Value("string"),
174
- "datasets_id": datasets.Value("int32"),
175
- "wiki_id": datasets.Value("string"),
176
- "start_paragraph": datasets.Value("int32"),
177
- "start_character": datasets.Value("int32"),
178
- "end_paragraph": datasets.Value("int32"),
179
- "end_character": datasets.Value("int32"),
180
- "article_title": datasets.Value("string"),
181
- "section_title": datasets.Value("string"),
182
- "passage_text": datasets.Value("string"),
183
- }
184
- ),
185
- supervised_keys=None,
186
- homepage="https://dumps.wikimedia.org",
187
- citation=_CITATION,
188
- )
189
-
190
- def _split_generators(self, dl_manager):
191
- # WARNING: It is a bad practice to use `datasets.load_dataset` inside a loading script. Please, avoid doing it.
192
- wikipedia = datasets.load_dataset(
193
- path=self.config.wikipedia_name,
194
- name=self.config.wikipedia_version_name,
195
- )
196
-
197
- return [
198
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"wikipedia": wikipedia}),
199
- ]
200
-
201
- def _generate_examples(self, wikipedia):
202
- logger.info(f"generating examples from = {self.config.wikipedia_name} {self.config.wikipedia_version_name}")
203
- for split in wikipedia:
204
- dset = wikipedia[split]
205
- split_function = _SPLIT_FUNCTION_MAP[self.config.wikipedia_name]
206
- for doc in generate_snippets(
207
- dset, split_function, passage_len=self.config.snippets_length, overlap=self.config.snippets_overlap
208
- ):
209
- id_ = doc["_id"]
210
- yield id_, doc