Datasets:
File size: 5,360 Bytes
267a466 8d09a36 267a466 fa08c6b 267a466 fa08c6b 267a466 fa08c6b 267a466 fa08c6b 267a466 fa08c6b 267a466 fa08c6b 267a466 8d09a36 267a466 9a25f80 267a466 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import datasets
_DESCRIPTION = """\
This corpus is an attempt to recreate the dataset used for training XLM-R. This corpus comprises of monolingual data for 100+ languages and also includes data for romanized languages (indicated by *_rom). This was constructed using the urls and paragraph indices provided by the CC-Net repository by processing January-December 2018 Commoncrawl snapshots. Each file comprises of documents separated by double-newlines and paragraphs within the same document separated by a newline. The data is generated using the open source CC-Net repository. No claims of intellectual property are made on the work of preparation of the corpus.
"""
_HOMEPAGE_URL = "https://data.statmt.org/cc-100/"
_CITATION = """\
@inproceedings{conneau-etal-2020-unsupervised,
title = "Unsupervised Cross-lingual Representation Learning at Scale",
author = "Conneau, Alexis and
Khandelwal, Kartikay and
Goyal, Naman and
Chaudhary, Vishrav and
Wenzek, Guillaume and
Guzm{\\'a}n, Francisco and
Grave, Edouard and
Ott, Myle and
Zettlemoyer, Luke and
Stoyanov, Veselin",
editor = "Jurafsky, Dan and
Chai, Joyce and
Schluter, Natalie and
Tetreault, Joel",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.747",
doi = "10.18653/v1/2020.acl-main.747",
pages = "8440--8451",
}
@inproceedings{wenzek-etal-2020-ccnet,
title = "{CCN}et: Extracting High Quality Monolingual Datasets from Web Crawl Data",
author = "Wenzek, Guillaume and
Lachaux, Marie-Anne and
Conneau, Alexis and
Chaudhary, Vishrav and
Guzm{\\'a}n, Francisco and
Joulin, Armand and
Grave, Edouard",
editor = "Calzolari, Nicoletta and
B{\\'e}chet, Fr{\\'e}d{\\'e}ric and
Blache, Philippe and
Choukri, Khalid and
Cieri, Christopher and
Declerck, Thierry and
Goggi, Sara and
Isahara, Hitoshi and
Maegaard, Bente and
Mariani, Joseph and
Mazo, H{\\'e}l{\\`e}ne and
Moreno, Asuncion and
Odijk, Jan and
Piperidis, Stelios",
booktitle = "Proceedings of the Twelfth Language Resources and Evaluation Conference",
month = may,
year = "2020",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2020.lrec-1.494",
pages = "4003--4012",
language = "English",
ISBN = "979-10-95546-34-4",
}
"""
_VERSION = "1.0.0"
_BASE_URL = "https://data.statmt.org/cc-100/{}.txt.xz"
# Please note: due to the size of the data, only few examples are provided.
# However, you can pass the lang parameter in config to fetch data of any language in the corpus
_LANGUAGES = ["am", "sr", "ka"]
class Cc100Config(datasets.BuilderConfig):
def __init__(self, *args, lang=None, **kwargs):
super().__init__(
*args,
name=f"{lang}",
**kwargs,
)
self.lang = lang
class Cc100(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
Cc100Config(
lang=lang,
description=f"Language: {lang}",
version=datasets.Version(_VERSION),
)
for lang in _LANGUAGES
]
BUILDER_CONFIG_CLASS = Cc100Config
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"text": datasets.Value("string"),
},
),
supervised_keys=None,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
def _base_url(lang):
return _BASE_URL.format(lang)
download_url = _base_url(self.config.lang)
path = dl_manager.download_and_extract(download_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"datapath": path},
)
]
def _generate_examples(self, datapath):
with open(datapath, encoding="utf-8") as f:
for sentence_counter, row in enumerate(f):
result = (
sentence_counter,
{
"id": str(sentence_counter),
"text": row,
},
)
yield result
|