Update files from the datasets library (from 1.4.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.4.0
- README.md +21 -21
- wikipedia.py +5 -3
README.md
CHANGED
@@ -27,7 +27,7 @@
|
|
27 |
- [Citation Information](#citation-information)
|
28 |
- [Contributions](#contributions)
|
29 |
|
30 |
-
##
|
31 |
|
32 |
- **Homepage:** [https://dumps.wikimedia.org](https://dumps.wikimedia.org)
|
33 |
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
@@ -37,7 +37,7 @@
|
|
37 |
- **Size of the generated dataset:** 35376.35 MB
|
38 |
- **Total amount of disk used:** 66115.60 MB
|
39 |
|
40 |
-
###
|
41 |
|
42 |
Wikipedia dataset containing cleaned articles of all languages.
|
43 |
The datasets are built from the Wikipedia dump
|
@@ -45,19 +45,19 @@ The datasets are built from the Wikipedia dump
|
|
45 |
contains the content of one full Wikipedia article with cleaning to strip
|
46 |
markdown and unwanted sections (references, etc.).
|
47 |
|
48 |
-
###
|
49 |
|
50 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
51 |
|
52 |
-
###
|
53 |
|
54 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
55 |
|
56 |
-
##
|
57 |
|
58 |
We show detailed information for up to 5 configurations of the dataset.
|
59 |
|
60 |
-
###
|
61 |
|
62 |
#### 20200501.de
|
63 |
|
@@ -114,7 +114,7 @@ An example of 'train' looks as follows.
|
|
114 |
|
115 |
```
|
116 |
|
117 |
-
###
|
118 |
|
119 |
The data fields are the same among all splits.
|
120 |
|
@@ -138,7 +138,7 @@ The data fields are the same among all splits.
|
|
138 |
- `title`: a `string` feature.
|
139 |
- `text`: a `string` feature.
|
140 |
|
141 |
-
###
|
142 |
|
143 |
| name | train |
|
144 |
|------------|------:|
|
@@ -148,49 +148,49 @@ The data fields are the same among all splits.
|
|
148 |
|20200501.frr| 11803|
|
149 |
|20200501.it |1931197|
|
150 |
|
151 |
-
##
|
152 |
|
153 |
-
###
|
154 |
|
155 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
156 |
|
157 |
-
###
|
158 |
|
159 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
160 |
|
161 |
-
###
|
162 |
|
163 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
164 |
|
165 |
-
###
|
166 |
|
167 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
168 |
|
169 |
-
##
|
170 |
|
171 |
-
###
|
172 |
|
173 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
174 |
|
175 |
-
###
|
176 |
|
177 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
178 |
|
179 |
-
###
|
180 |
|
181 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
182 |
|
183 |
-
##
|
184 |
|
185 |
-
###
|
186 |
|
187 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
188 |
|
189 |
-
###
|
190 |
|
191 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
192 |
|
193 |
-
###
|
194 |
|
195 |
```
|
196 |
@ONLINE {wikidump,
|
|
|
27 |
- [Citation Information](#citation-information)
|
28 |
- [Contributions](#contributions)
|
29 |
|
30 |
+
## Dataset Description
|
31 |
|
32 |
- **Homepage:** [https://dumps.wikimedia.org](https://dumps.wikimedia.org)
|
33 |
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
|
|
37 |
- **Size of the generated dataset:** 35376.35 MB
|
38 |
- **Total amount of disk used:** 66115.60 MB
|
39 |
|
40 |
+
### Dataset Summary
|
41 |
|
42 |
Wikipedia dataset containing cleaned articles of all languages.
|
43 |
The datasets are built from the Wikipedia dump
|
|
|
45 |
contains the content of one full Wikipedia article with cleaning to strip
|
46 |
markdown and unwanted sections (references, etc.).
|
47 |
|
48 |
+
### Supported Tasks
|
49 |
|
50 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
51 |
|
52 |
+
### Languages
|
53 |
|
54 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
55 |
|
56 |
+
## Dataset Structure
|
57 |
|
58 |
We show detailed information for up to 5 configurations of the dataset.
|
59 |
|
60 |
+
### Data Instances
|
61 |
|
62 |
#### 20200501.de
|
63 |
|
|
|
114 |
|
115 |
```
|
116 |
|
117 |
+
### Data Fields
|
118 |
|
119 |
The data fields are the same among all splits.
|
120 |
|
|
|
138 |
- `title`: a `string` feature.
|
139 |
- `text`: a `string` feature.
|
140 |
|
141 |
+
### Data Splits Sample Size
|
142 |
|
143 |
| name | train |
|
144 |
|------------|------:|
|
|
|
148 |
|20200501.frr| 11803|
|
149 |
|20200501.it |1931197|
|
150 |
|
151 |
+
## Dataset Creation
|
152 |
|
153 |
+
### Curation Rationale
|
154 |
|
155 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
156 |
|
157 |
+
### Source Data
|
158 |
|
159 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
160 |
|
161 |
+
### Annotations
|
162 |
|
163 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
164 |
|
165 |
+
### Personal and Sensitive Information
|
166 |
|
167 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
168 |
|
169 |
+
## Considerations for Using the Data
|
170 |
|
171 |
+
### Social Impact of Dataset
|
172 |
|
173 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
174 |
|
175 |
+
### Discussion of Biases
|
176 |
|
177 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
178 |
|
179 |
+
### Other Known Limitations
|
180 |
|
181 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
182 |
|
183 |
+
## Additional Information
|
184 |
|
185 |
+
### Dataset Curators
|
186 |
|
187 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
188 |
|
189 |
+
### Licensing Information
|
190 |
|
191 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
192 |
|
193 |
+
### Citation Information
|
194 |
|
195 |
```
|
196 |
@ONLINE {wikidump,
|
wikipedia.py
CHANGED
@@ -20,7 +20,6 @@ from __future__ import absolute_import, division, print_function
|
|
20 |
|
21 |
import codecs
|
22 |
import json
|
23 |
-
import logging
|
24 |
import re
|
25 |
import xml.etree.cElementTree as etree
|
26 |
|
@@ -29,6 +28,9 @@ import six
|
|
29 |
import datasets
|
30 |
|
31 |
|
|
|
|
|
|
|
32 |
if six.PY3:
|
33 |
import bz2 # pylint:disable=g-import-not-at-top
|
34 |
else:
|
@@ -461,7 +463,7 @@ class Wikipedia(datasets.BeamBasedBuilder):
|
|
461 |
|
462 |
def _extract_content(filepath):
|
463 |
"""Extracts article content from a single WikiMedia XML file."""
|
464 |
-
|
465 |
with beam.io.filesystems.FileSystems.open(filepath) as f:
|
466 |
f = bz2.BZ2File(filename=f)
|
467 |
if six.PY3:
|
@@ -506,7 +508,7 @@ class Wikipedia(datasets.BeamBasedBuilder):
|
|
506 |
text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell)
|
507 |
except (mwparserfromhell.parser.ParserError) as e:
|
508 |
beam.metrics.Metrics.counter(language, "parser-error").inc()
|
509 |
-
|
510 |
return
|
511 |
|
512 |
if not text:
|
|
|
20 |
|
21 |
import codecs
|
22 |
import json
|
|
|
23 |
import re
|
24 |
import xml.etree.cElementTree as etree
|
25 |
|
|
|
28 |
import datasets
|
29 |
|
30 |
|
31 |
+
logger = datasets.logging.get_logger(__name__)
|
32 |
+
|
33 |
+
|
34 |
if six.PY3:
|
35 |
import bz2 # pylint:disable=g-import-not-at-top
|
36 |
else:
|
|
|
463 |
|
464 |
def _extract_content(filepath):
|
465 |
"""Extracts article content from a single WikiMedia XML file."""
|
466 |
+
logger.info("generating examples from = %s", filepath)
|
467 |
with beam.io.filesystems.FileSystems.open(filepath) as f:
|
468 |
f = bz2.BZ2File(filename=f)
|
469 |
if six.PY3:
|
|
|
508 |
text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell)
|
509 |
except (mwparserfromhell.parser.ParserError) as e:
|
510 |
beam.metrics.Metrics.counter(language, "parser-error").inc()
|
511 |
+
logger.error("mwparserfromhell ParseError: %s", e)
|
512 |
return
|
513 |
|
514 |
if not text:
|