system HF staff commited on
Commit
80d4f4b
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +430 -0
  3. dummy/0.0.0/dummy_data.zip +3 -0
  4. wiki_dpr.py +190 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "psgs_w100_with_nq_embeddings": {
3
+ "description": "\nThis is the wikipedia split used to evaluate the Dense Passage Retrieval (DPR) model.\nIt contains 21M passages from wikipedia along with their DPR embeddings.\nThe wikipedia articles were split into multiple, disjoint text blocks of 100 words as passages.\n",
4
+ "citation": "\n@misc{karpukhin2020dense,\n title={Dense Passage Retrieval for Open-Domain Question Answering},\n author={Vladimir Karpukhin and Barlas Oğuz and Sewon Min and Patrick Lewis and Ledell Wu and Sergey Edunov and Danqi Chen and Wen-tau Yih},\n year={2020},\n eprint={2004.04906},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
5
+ "homepage": "https://github.com/facebookresearch/DPR",
6
+ "license": "",
7
+ "features": {
8
+ "id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "text": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "title": {
19
+ "dtype": "string",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "embeddings": {
24
+ "feature": {
25
+ "dtype": "float32",
26
+ "id": null,
27
+ "_type": "Value"
28
+ },
29
+ "length": -1,
30
+ "id": null,
31
+ "_type": "Sequence"
32
+ }
33
+ },
34
+ "supervised_keys": null,
35
+ "builder_name": "wiki_dpr",
36
+ "config_name": "psgs_w100_with_nq_embeddings",
37
+ "version": {
38
+ "version_str": "1.0.0",
39
+ "description": null,
40
+ "datasets_version_to_prepare": null,
41
+ "major": 1,
42
+ "minor": 0,
43
+ "patch": 0
44
+ },
45
+ "splits": {
46
+ "train": {
47
+ "name": "train",
48
+ "num_bytes": 78419281788,
49
+ "num_examples": 21015300,
50
+ "dataset_name": "wiki_dpr"
51
+ }
52
+ },
53
+ "download_checksums": {
54
+ "https://dl.fbaipublicfiles.com/dpr/wikipedia_split/psgs_w100.tsv.gz": {
55
+ "num_bytes": 4694541059,
56
+ "checksum": "c39b020c855a2b5c25ffef3abe4a3b6f9b829ad7dbc14ec3d163d34d7c53ea8d"
57
+ },
58
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_0": {
59
+ "num_bytes": 1324693633,
60
+ "checksum": "c277bc2ba1ef027ee86cb24b1df7015aaa32972da88d5183f6e5a9563d8705b5"
61
+ },
62
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_1": {
63
+ "num_bytes": 1324804738,
64
+ "checksum": "9bcfb323ccb0c0f8ed5de949287129c630bfee52738cd2ae232a52eaa6cf6faf"
65
+ },
66
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_2": {
67
+ "num_bytes": 1325065657,
68
+ "checksum": "c66c771b0363ceb78e19f7fdc7348159c9b4d548837b3856da5d867811f42191"
69
+ },
70
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_3": {
71
+ "num_bytes": 1325225044,
72
+ "checksum": "3659fccf01a2640fcc5da35161b7afb89ea669f295b7570d78678afd32c8b9b4"
73
+ },
74
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_4": {
75
+ "num_bytes": 1325225044,
76
+ "checksum": "be0aeb8110c57ee72c9560b857cdac192a23c56f42f1d8bcbdc89819f665b2cc"
77
+ },
78
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_5": {
79
+ "num_bytes": 1325225044,
80
+ "checksum": "9a108bcc265d44c93dbbbaddddc719812749dd29d9a0a28d1947de723771ab75"
81
+ },
82
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_6": {
83
+ "num_bytes": 1325225044,
84
+ "checksum": "eebc7349ad3fb2fd739bc88b2032767ad2b6efefb3089118a80b1218379b17fb"
85
+ },
86
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_7": {
87
+ "num_bytes": 1325225044,
88
+ "checksum": "be6f7e8363a6efc79c0605b8ef0fce81d786982aa0f25b57ca906301db265771"
89
+ },
90
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_8": {
91
+ "num_bytes": 1325225044,
92
+ "checksum": "644d4ae3a377dd863187b87cbb060790d175eecae78f19d23a63ec3f863dc38d"
93
+ },
94
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_9": {
95
+ "num_bytes": 1325225044,
96
+ "checksum": "5eb67cc2317a10a7a787e3557295f5403132db57d7dcb71dde3a69af7d93a51f"
97
+ },
98
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_10": {
99
+ "num_bytes": 1325225044,
100
+ "checksum": "321769679debd5824333cb81e2ea0337c40b9e02c2ee7ceed8eb301d5bb5b3dc"
101
+ },
102
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_11": {
103
+ "num_bytes": 1325225044,
104
+ "checksum": "9dccb04ce9f274907889cc1d42beb5272704b6a894bcd3548457bfe04e54e906"
105
+ },
106
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_12": {
107
+ "num_bytes": 1325225044,
108
+ "checksum": "ab5c1550f2fe9af15767920b9f31c7fcb3d5f5720426a2531ee6097e598589ca"
109
+ },
110
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_13": {
111
+ "num_bytes": 1325225044,
112
+ "checksum": "2fffbba9459d8a6800927c24bdc167ef7217807b4bab4c811dfd86f7dc3255ef"
113
+ },
114
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_14": {
115
+ "num_bytes": 1325225044,
116
+ "checksum": "141451e6fc8e1dfa6bb4dc0ef915b87115e933503b762991da043e180ab06a76"
117
+ },
118
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_15": {
119
+ "num_bytes": 1325225044,
120
+ "checksum": "638177a8ecaa6941be2e59719d59b3b5f6494e81cefd922c39168b9b4a74d92d"
121
+ },
122
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_16": {
123
+ "num_bytes": 1325225044,
124
+ "checksum": "d4f6b2a67938e3c86e96d7edb524d02b209e3035118ee5edd6b2214defa037a2"
125
+ },
126
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_17": {
127
+ "num_bytes": 1325225044,
128
+ "checksum": "695b8c287543f67e80f9f7be32f37b75ccc66113ed9e53e24dd6f764a3a99e5a"
129
+ },
130
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_18": {
131
+ "num_bytes": 1325225044,
132
+ "checksum": "1009ee62bdfd873a315641fead549c65b211e8fa994c72e280fecfc0d4869ac9"
133
+ },
134
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_19": {
135
+ "num_bytes": 1325225044,
136
+ "checksum": "a8bcc7039c26533bb7bc503d52b214459d9a8d2a3c5ccdd8fe514dfa1f41dccf"
137
+ },
138
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_20": {
139
+ "num_bytes": 1325225044,
140
+ "checksum": "88c92e6c8a901e5c4d87721771b490365c19ba037e35ef46d754f20179e2523a"
141
+ },
142
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_21": {
143
+ "num_bytes": 1325225044,
144
+ "checksum": "74cd7c267b88c226f56b163cb18935da55feba4bcf7f170aab96c92a3c305b7e"
145
+ },
146
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_22": {
147
+ "num_bytes": 1325225044,
148
+ "checksum": "aa636b4ee26ee8e15bd02e47ebc8be29c3417a9b2d8c464523f9758d88426c3a"
149
+ },
150
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_23": {
151
+ "num_bytes": 1325312389,
152
+ "checksum": "5875f08a5a07716dd0e1aa77d434e10eb2b96c46ce7e975976c8249c3ff829c2"
153
+ },
154
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_24": {
155
+ "num_bytes": 1325645350,
156
+ "checksum": "dbb94b01128dd54f069f4358b56139c92cd143ae2ead3bfc3a496edd10d004d4"
157
+ },
158
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_25": {
159
+ "num_bytes": 1325645350,
160
+ "checksum": "cc3f67bcf59df92f8138ee9ac2691f95b01c0d0d21ac7fa609cad4b80ebc889b"
161
+ },
162
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_26": {
163
+ "num_bytes": 1325645350,
164
+ "checksum": "8831a3070780342f281bead5822108eaed2d1af8d99806b40c2c72d5e6a82cbf"
165
+ },
166
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_27": {
167
+ "num_bytes": 1325645350,
168
+ "checksum": "979d2c3ead22e144a4c5f8391021b80358c5060f9535f3beab3e5a72f8a62f73"
169
+ },
170
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_28": {
171
+ "num_bytes": 1325645350,
172
+ "checksum": "6f8ebd342025c77bd08e36937d8e8d5e5745756854d86a89ffd41063f0c16d2d"
173
+ },
174
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_29": {
175
+ "num_bytes": 1325645350,
176
+ "checksum": "e2a1df59fdc22715b0826af0efeed5019d4e84c0b2542c3cc35a1cd127052705"
177
+ },
178
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_30": {
179
+ "num_bytes": 1325645350,
180
+ "checksum": "3a69a8de887737783b2f1289b2d849cc1d8866a27fe1627cb725aa3cd526b240"
181
+ },
182
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_31": {
183
+ "num_bytes": 1325645350,
184
+ "checksum": "5f707e66d0164fda1d75621ae64ccc3af97d4d07d0f1adf7de5f307128824591"
185
+ },
186
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_32": {
187
+ "num_bytes": 1325645350,
188
+ "checksum": "a964d6f34f461015de096790dfef229304a730cfeb47de89ba71bee47d57403d"
189
+ },
190
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_33": {
191
+ "num_bytes": 1325645350,
192
+ "checksum": "5c993188956e09e694b2e63f766b0af23ded9d4be74bc0776949ea3b0ab29ce2"
193
+ },
194
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_34": {
195
+ "num_bytes": 1325645350,
196
+ "checksum": "8dd3d1888c751a05894176040beed7ea5ef782807585816461f708766d93b71f"
197
+ },
198
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_35": {
199
+ "num_bytes": 1325645350,
200
+ "checksum": "51578bc9550874d455fc193830e5bada0200509516461aeff606e3e4bf173397"
201
+ },
202
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_36": {
203
+ "num_bytes": 1325645350,
204
+ "checksum": "ac527cd094b87d7a0feebe395e1a76d015cd816f46c0fec6b9fc6cf81a46a7bf"
205
+ },
206
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_37": {
207
+ "num_bytes": 1325645350,
208
+ "checksum": "70fa0fd6b2c536bf321cbcaf0a8ac9089308d30ee64dee1fc98b8d64c3a30c21"
209
+ },
210
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_38": {
211
+ "num_bytes": 1325645350,
212
+ "checksum": "824bce4e5f347eb101cc1e7b8ae0e6a0c826b8bc52f8bdaa9cff306291c91c03"
213
+ },
214
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_39": {
215
+ "num_bytes": 1325645350,
216
+ "checksum": "bd7ed88277ba3b709c0a2a4ed6e96a86ed56ab3f7be589daa90ca4853f521737"
217
+ },
218
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_40": {
219
+ "num_bytes": 1325645350,
220
+ "checksum": "09cc45681ff15f3351718426987e78725176d2071b4112bb728c6c633c4a4188"
221
+ },
222
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_41": {
223
+ "num_bytes": 1325645350,
224
+ "checksum": "2151142c79ebf73219e324632fa60b535507c530d3ad3aa1ed9482488ac96661"
225
+ },
226
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_42": {
227
+ "num_bytes": 1325645350,
228
+ "checksum": "fd1b1a1d714e058c5d7380e0046a51720135965a342a0d0be79e6ee8a2c07308"
229
+ },
230
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_43": {
231
+ "num_bytes": 1325645350,
232
+ "checksum": "8d6f525ded1df8b6b7026ee77169e1301bc032102ac1927d6485de057bbcdedd"
233
+ },
234
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_44": {
235
+ "num_bytes": 1325645350,
236
+ "checksum": "479d7d37ce8ff59c16a5ff8fe6c23a3e95d293c407d06afe3952c4b27f353210"
237
+ },
238
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_45": {
239
+ "num_bytes": 1325645350,
240
+ "checksum": "3c10f56309521ed53a7dc617e80b0030c4e2ac82273e9a4047653c09dd8bd733"
241
+ },
242
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_46": {
243
+ "num_bytes": 1325645350,
244
+ "checksum": "3822cfb8fc4003d31816a568426b4d8ce5ba2bada3426938d3797a7e120b7624"
245
+ },
246
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_47": {
247
+ "num_bytes": 1325645350,
248
+ "checksum": "c82720464b2bf32f77c11e08981a5d02e1222780fbf7104d2301e383d0716851"
249
+ },
250
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_48": {
251
+ "num_bytes": 1325645350,
252
+ "checksum": "5bf11344b81fb0de80c7e729d545fd9f09c34fbdb02fe5f4ae308e2572912782"
253
+ },
254
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_49": {
255
+ "num_bytes": 1325645350,
256
+ "checksum": "652ebd7b0528f21ee59ab90de9a6d64a4ca12e0e9e36ce02f2fac7c1ed5587bc"
257
+ }
258
+ },
259
+ "download_size": 70965697456,
260
+ "dataset_size": 78419281788,
261
+ "size_in_bytes": 149384979244
262
+ },
263
+ "dummy_psgs_w100_no_embeddings": {
264
+ "description": "\nThis is the wikipedia split used to evaluate the Dense Passage Retrieval (DPR) model.\nIt contains 21M passages from wikipedia along with their DPR embeddings.\nThe wikipedia articles were split into multiple, disjoint text blocks of 100 words as passages.\n",
265
+ "citation": "\n@misc{karpukhin2020dense,\n title={Dense Passage Retrieval for Open-Domain Question Answering},\n author={Vladimir Karpukhin and Barlas Oğuz and Sewon Min and Patrick Lewis and Ledell Wu and Sergey Edunov and Danqi Chen and Wen-tau Yih},\n year={2020},\n eprint={2004.04906},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
266
+ "homepage": "https://github.com/facebookresearch/DPR",
267
+ "license": "",
268
+ "features": {
269
+ "id": {
270
+ "dtype": "string",
271
+ "id": null,
272
+ "_type": "Value"
273
+ },
274
+ "text": {
275
+ "dtype": "string",
276
+ "id": null,
277
+ "_type": "Value"
278
+ },
279
+ "title": {
280
+ "dtype": "string",
281
+ "id": null,
282
+ "_type": "Value"
283
+ }
284
+ },
285
+ "supervised_keys": null,
286
+ "builder_name": "wiki_dpr",
287
+ "config_name": "dummy_psgs_w100_no_embeddings",
288
+ "version": {
289
+ "version_str": "1.0.0",
290
+ "description": null,
291
+ "datasets_version_to_prepare": null,
292
+ "major": 1,
293
+ "minor": 0,
294
+ "patch": 0
295
+ },
296
+ "splits": {
297
+ "train": {
298
+ "name": "train",
299
+ "num_bytes": 6562601,
300
+ "num_examples": 10000,
301
+ "dataset_name": "wiki_dpr"
302
+ }
303
+ },
304
+ "download_checksums": {
305
+ "https://dl.fbaipublicfiles.com/dpr/wikipedia_split/psgs_w100.tsv.gz": {
306
+ "num_bytes": 4694541059,
307
+ "checksum": "c39b020c855a2b5c25ffef3abe4a3b6f9b829ad7dbc14ec3d163d34d7c53ea8d"
308
+ }
309
+ },
310
+ "download_size": 4694541059,
311
+ "dataset_size": 6562601,
312
+ "size_in_bytes": 4701103660
313
+ },
314
+ "dummy_psgs_w100_with_nq_embeddings": {
315
+ "description": "\nThis is the wikipedia split used to evaluate the Dense Passage Retrieval (DPR) model.\nIt contains 21M passages from wikipedia along with their DPR embeddings.\nThe wikipedia articles were split into multiple, disjoint text blocks of 100 words as passages.\n",
316
+ "citation": "\n@misc{karpukhin2020dense,\n title={Dense Passage Retrieval for Open-Domain Question Answering},\n author={Vladimir Karpukhin and Barlas Oğuz and Sewon Min and Patrick Lewis and Ledell Wu and Sergey Edunov and Danqi Chen and Wen-tau Yih},\n year={2020},\n eprint={2004.04906},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
317
+ "homepage": "https://github.com/facebookresearch/DPR",
318
+ "license": "",
319
+ "features": {
320
+ "id": {
321
+ "dtype": "string",
322
+ "id": null,
323
+ "_type": "Value"
324
+ },
325
+ "text": {
326
+ "dtype": "string",
327
+ "id": null,
328
+ "_type": "Value"
329
+ },
330
+ "title": {
331
+ "dtype": "string",
332
+ "id": null,
333
+ "_type": "Value"
334
+ },
335
+ "embeddings": {
336
+ "feature": {
337
+ "dtype": "float32",
338
+ "id": null,
339
+ "_type": "Value"
340
+ },
341
+ "length": -1,
342
+ "id": null,
343
+ "_type": "Sequence"
344
+ }
345
+ },
346
+ "supervised_keys": null,
347
+ "builder_name": "wiki_dpr",
348
+ "config_name": "dummy_psgs_w100_with_nq_embeddings",
349
+ "version": {
350
+ "version_str": "1.0.0",
351
+ "description": null,
352
+ "datasets_version_to_prepare": null,
353
+ "major": 1,
354
+ "minor": 0,
355
+ "patch": 0
356
+ },
357
+ "splits": {
358
+ "train": {
359
+ "name": "train",
360
+ "num_bytes": 37322605,
361
+ "num_examples": 10000,
362
+ "dataset_name": "wiki_dpr"
363
+ }
364
+ },
365
+ "download_checksums": {
366
+ "https://dl.fbaipublicfiles.com/dpr/wikipedia_split/psgs_w100.tsv.gz": {
367
+ "num_bytes": 4694541059,
368
+ "checksum": "c39b020c855a2b5c25ffef3abe4a3b6f9b829ad7dbc14ec3d163d34d7c53ea8d"
369
+ },
370
+ "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_0": {
371
+ "num_bytes": 1324693633,
372
+ "checksum": "c277bc2ba1ef027ee86cb24b1df7015aaa32972da88d5183f6e5a9563d8705b5"
373
+ }
374
+ },
375
+ "download_size": 6019234692,
376
+ "dataset_size": 37322605,
377
+ "size_in_bytes": 6056557297
378
+ },
379
+ "psgs_w100_no_embeddings": {
380
+ "description": "\nThis is the wikipedia split used to evaluate the Dense Passage Retrieval (DPR) model.\nIt contains 21M passages from wikipedia along with their DPR embeddings.\nThe wikipedia articles were split into multiple, disjoint text blocks of 100 words as passages.\n",
381
+ "citation": "\n@misc{karpukhin2020dense,\n title={Dense Passage Retrieval for Open-Domain Question Answering},\n author={Vladimir Karpukhin and Barlas Oğuz and Sewon Min and Patrick Lewis and Ledell Wu and Sergey Edunov and Danqi Chen and Wen-tau Yih},\n year={2020},\n eprint={2004.04906},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
382
+ "homepage": "https://github.com/facebookresearch/DPR",
383
+ "license": "",
384
+ "features": {
385
+ "id": {
386
+ "dtype": "string",
387
+ "id": null,
388
+ "_type": "Value"
389
+ },
390
+ "text": {
391
+ "dtype": "string",
392
+ "id": null,
393
+ "_type": "Value"
394
+ },
395
+ "title": {
396
+ "dtype": "string",
397
+ "id": null,
398
+ "_type": "Value"
399
+ }
400
+ },
401
+ "supervised_keys": null,
402
+ "builder_name": "wiki_dpr",
403
+ "config_name": "psgs_w100_no_embeddings",
404
+ "version": {
405
+ "version_str": "1.0.0",
406
+ "description": null,
407
+ "datasets_version_to_prepare": null,
408
+ "major": 1,
409
+ "minor": 0,
410
+ "patch": 0
411
+ },
412
+ "splits": {
413
+ "train": {
414
+ "name": "train",
415
+ "num_bytes": 13776210580,
416
+ "num_examples": 21015300,
417
+ "dataset_name": "wiki_dpr"
418
+ }
419
+ },
420
+ "download_checksums": {
421
+ "https://dl.fbaipublicfiles.com/dpr/wikipedia_split/psgs_w100.tsv.gz": {
422
+ "num_bytes": 4694541059,
423
+ "checksum": "c39b020c855a2b5c25ffef3abe4a3b6f9b829ad7dbc14ec3d163d34d7c53ea8d"
424
+ }
425
+ },
426
+ "download_size": 4694541059,
427
+ "dataset_size": 13776210580,
428
+ "size_in_bytes": 18470751639
429
+ }
430
+ }
dummy/0.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21f78c610a9931eba911716ed4d8b0d28357ed97146156de58936737961c52f7
3
+ size 1909
wiki_dpr.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ import numpy as np
5
+
6
+ import datasets
7
+
8
+
9
+ _CITATION = """
10
+ @misc{karpukhin2020dense,
11
+ title={Dense Passage Retrieval for Open-Domain Question Answering},
12
+ author={Vladimir Karpukhin and Barlas Oğuz and Sewon Min and Patrick Lewis and Ledell Wu and Sergey Edunov and Danqi Chen and Wen-tau Yih},
13
+ year={2020},
14
+ eprint={2004.04906},
15
+ archivePrefix={arXiv},
16
+ primaryClass={cs.CL}
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """
21
+ This is the wikipedia split used to evaluate the Dense Passage Retrieval (DPR) model.
22
+ It contains 21M passages from wikipedia along with their DPR embeddings.
23
+ The wikipedia articles were split into multiple, disjoint text blocks of 100 words as passages.
24
+ """
25
+
26
+ _LICENSE = """DPR is CC-BY-NC 4.0 licensed."""
27
+
28
+ _DATA_URL = "https://dl.fbaipublicfiles.com/dpr/wikipedia_split/psgs_w100.tsv.gz"
29
+
30
+ _VECTORS_URL = "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/nq/wiki_passages_{i}"
31
+
32
+ _INDEX_URL = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr"
33
+
34
+
35
+ class WikiDprConfig(datasets.BuilderConfig):
36
+ """BuilderConfig for WikiDprConfig."""
37
+
38
+ def __init__(
39
+ self,
40
+ with_embeddings=True,
41
+ with_index=True,
42
+ wiki_split="psgs_w100",
43
+ embeddings_name="nq",
44
+ index_name="compressed",
45
+ index_train_size=262144,
46
+ dummy=False,
47
+ **kwargs,
48
+ ):
49
+ """BuilderConfig for WikiSnippets.
50
+ Args:
51
+ with_embeddings (`bool`, defaults to `True`): Load the 768-dimensional embeddings from DPR trained on NQ.
52
+ with_index (`bool`, defaults to `True`): Load the faiss index trained on the embeddings.
53
+ **kwargs: keyword arguments forwarded to super.
54
+ """
55
+ self.with_embeddings = with_embeddings
56
+ self.with_index = with_index
57
+ self.wiki_split = wiki_split
58
+ self.embeddings_name = embeddings_name if with_embeddings else "no_embeddings"
59
+ self.index_name = index_name if with_index else "no_index"
60
+ self.index_train_size = index_train_size
61
+ self.dummy = dummy
62
+ name = [self.wiki_split, self.embeddings_name, self.index_name]
63
+ if self.dummy:
64
+ name = ["dummy"] + name
65
+ assert (
66
+ self.index_name != "compressed" or not self.with_index
67
+ ), "Please use `index_name='exact' for dummy wiki_dpr`"
68
+ kwargs["name"] = ".".join(name)
69
+ super(WikiDprConfig, self).__init__(**kwargs)
70
+
71
+ if self.index_name == "exact":
72
+ self.index_file = "psgs_w100.nq.IndexHNSWFlat-IP-{split}.faiss"
73
+ else:
74
+ self.index_file = "psgs_w100.nq.IVFPQ4096_HNSW32_PQ64-IP-{split}.faiss"
75
+ if self.dummy:
76
+ self.index_file = "dummy." + self.index_file
77
+
78
+
79
+ class WikiDpr(datasets.GeneratorBasedBuilder):
80
+ BUILDER_CONFIG_CLASS = WikiDprConfig
81
+
82
+ def _info(self):
83
+ return datasets.DatasetInfo(
84
+ description=_DESCRIPTION,
85
+ features=datasets.Features(
86
+ {
87
+ "id": datasets.Value("string"),
88
+ "text": datasets.Value("string"),
89
+ "title": datasets.Value("string"),
90
+ "embeddings": datasets.Sequence(datasets.Value("float32")),
91
+ }
92
+ )
93
+ if self.config.with_embeddings
94
+ else datasets.Features(
95
+ {"id": datasets.Value("string"), "text": datasets.Value("string"), "title": datasets.Value("string")}
96
+ ),
97
+ supervised_keys=None,
98
+ homepage="https://github.com/facebookresearch/DPR",
99
+ citation=_CITATION,
100
+ )
101
+
102
+ def _split_generators(self, dl_manager):
103
+ files_to_download = {"data_file": _DATA_URL}
104
+ downloaded_files = dl_manager.download_and_extract(files_to_download)
105
+ if self.config.with_embeddings:
106
+ if self.config.dummy:
107
+ downloaded_files["vectors_files"] = dl_manager.download([_VECTORS_URL.format(i=0)])
108
+ else:
109
+ downloaded_files["vectors_files"] = dl_manager.download([_VECTORS_URL.format(i=i) for i in range(50)])
110
+ return [
111
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=downloaded_files),
112
+ ]
113
+
114
+ def _generate_examples(self, data_file, vectors_files=None):
115
+ vec_idx = 0
116
+ vecs = []
117
+ lines = open(data_file, "r", encoding="utf-8")
118
+ next(lines) # skip headers
119
+ for i, line in enumerate(lines):
120
+ if self.config.dummy and i == 10000:
121
+ break
122
+ if i == 21015300:
123
+ break # ignore the last 24 examples for which the embeddings are missing.
124
+ id, text, title = line.strip().split("\t")
125
+ text = text[1:-1] # remove " symbol at the beginning and the end
126
+ text = text.replace('""', '"') # replace double quotes by simple quotes
127
+ if self.config.with_embeddings:
128
+ if vec_idx >= len(vecs):
129
+ if len(vectors_files) == 0:
130
+ logging.warning("Ran out of vector files at index {}".format(i))
131
+ break
132
+ vecs = np.load(open(vectors_files.pop(0), "rb"), allow_pickle=True)
133
+ vec_idx = 0
134
+ vec_id, vec = vecs[vec_idx]
135
+ assert int(id) == int(vec_id), "ID mismatch between lines {} and vector {}".format(id, vec_id)
136
+ yield id, {"id": id, "text": text, "title": title, "embeddings": vec}
137
+ vec_idx += 1
138
+ else:
139
+ yield id, {
140
+ "id": id,
141
+ "text": text,
142
+ "title": title,
143
+ }
144
+
145
+ def _post_processing_resources(self, split):
146
+ if self.config.with_index:
147
+ return {"embeddings_index": self.config.index_file.format(split=split)}
148
+ else:
149
+ return {}
150
+
151
+ def _download_post_processing_resources(self, split, resource_name, dl_manager):
152
+ if resource_name == "embeddings_index":
153
+ try:
154
+ downloaded_resources = dl_manager.download_and_extract(
155
+ {"embeddings_index": os.path.join(_INDEX_URL, self.config.index_file.format(split=split))}
156
+ )
157
+ return downloaded_resources["embeddings_index"]
158
+ except ConnectionError: # index doesn't exist
159
+ pass
160
+
161
+ def _post_process(self, dataset, resources_paths):
162
+ if self.config.with_index:
163
+ index_file = resources_paths["embeddings_index"]
164
+ if os.path.exists(index_file):
165
+ dataset.load_faiss_index("embeddings", index_file)
166
+ else:
167
+ if "embeddings" not in dataset.column_names:
168
+ raise ValueError("Couldn't build the index because there are no embeddings.")
169
+ import faiss
170
+
171
+ train_size = self.config.index_train_size
172
+ logging.info("Building wiki_dpr faiss index")
173
+ if self.config.index_name == "exact":
174
+ d = 768
175
+ index = faiss.IndexHNSWFlat(d, 512, faiss.METRIC_INNER_PRODUCT)
176
+ dataset.add_faiss_index("embeddings", custom_index=index)
177
+ else:
178
+ d = 768
179
+ quantizer = faiss.IndexHNSWFlat(d, 32, faiss.METRIC_INNER_PRODUCT)
180
+ ivf_index = faiss.IndexIVFPQ(quantizer, d, 4096, 64, 8, faiss.METRIC_INNER_PRODUCT)
181
+ ivf_index.own_fields = True
182
+ quantizer.this.disown()
183
+ dataset.add_faiss_index(
184
+ "embeddings",
185
+ train_size=train_size,
186
+ custom_index=ivf_index,
187
+ )
188
+ logging.info("Saving wiki_dpr faiss index")
189
+ dataset.save_faiss_index("embeddings", index_file)
190
+ return dataset