File size: 3,341 Bytes
1e342a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# This is a huggingface dataset script to load wikipedias for the eu languages using olm/wikipedia

# https://huggingface.co/datasets/olm/wikipedia/blob/main/wikipedia.py

import datasets
from datasets import DatasetDict

logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@ONLINE {wikidump,
    author = {Wikimedia Foundation},
    title  = {Wikimedia Downloads},
    url    = {https://dumps.wikimedia.org}
}
"""

_DESCRIPTION = """\
Wikipedia dataset containing cleaned articles of all languages.
The datasets are built from the Wikipedia dump
(https://dumps.wikimedia.org/) with one split per language. Each example
contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""

_LICENSE = (
    "This work is licensed under the Creative Commons Attribution-ShareAlike "
    "3.0 Unported License. To view a copy of this license, visit "
    "http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
    "Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)

_VERSION = datasets.Version("1.0.0", "")


eu_languages = [
    "bg",
    "cs",
    # "da",
    # "de",
    # "el",
    # "en",
    # "es",
    # "et",
    # "fi",
    # "fr",
    # "ga",
    # "hr",
    # "hu",
    # "it",
    # "lt",
    # "lv",
    # "mt",
    # "nl",
    # "pl",
    # "pt",
    # "ro",
    # "sk",
    # "sl",
    # "sv",
]

class WikipediaConfig(datasets.BuilderConfig):
    """BuilderConfig for EuWikipedia."""

    def __init__(self, date=None, version=_VERSION, **kwargs):
        """BuilderConfig for Wikipedia.
        Args:
          date: string, date of the Wikipedia dump in YYYYMMDD format. A list of
            available dates can be found at https://dumps.wikimedia.org/enwiki/.
          **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(
            name=f"{date}",
            description=f"Wikipedia dataset for EU languages, parsed from {date} dump.",
            version=version,
            **kwargs,
        )
        self.date = date


_DATE = "20221101"


class EuWikipedia(datasets.GeneratorBasedBuilder):
    """Wikipedia dataset."""

    # Use mirror (your.org) to avoid download caps.
    BUILDER_CONFIG_CLASS = WikipediaConfig
    BUILDER_CONFIGS = [WikipediaConfig(date=_DATE,)]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "url": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "text": datasets.Value("string"),
                }
            ),
            # No default supervised_keys.
            supervised_keys=None,
            homepage="https://dumps.wikimedia.org",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"date": self.config.date}
            )
        ]

    def _generate_examples(self, date):
        # defer to olm/wikipedia
        for lang in eu_languages:
            for example in datasets.load_dataset("olm/wikipedia", language=lang, date=date):
                yield example