|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import re |
|
import warnings |
|
|
|
import datasets |
|
import requests |
|
|
|
_DESCRIPTION = """\ |
|
CCMatrix: Mining Billions of High-Quality Parallel Sentences on the WEB |
|
|
|
We show that margin-based bitext mining in LASER's multilingual sentence space can be applied to |
|
monolingual corpora of billions of sentences to produce high quality aligned translation data. |
|
We use thirty-two snapshots of a curated common crawl corpus [1] totaling 69 billion unique sentences. |
|
Using one unified approach for 80 languages, we were able to mine 10.8 billion parallel sentences, |
|
out of which only 2.9 billion are aligned with English. |
|
|
|
IMPORTANT: Please cite reference [2][3] if you use this data. |
|
|
|
[1] Guillaume Wenzek, Marie-Anne Lachaux, Alexis Conneau, Vishrav Chaudhary, Francisco Guzmán, Armand Jouli |
|
and Edouard Grave, CCNet: Extracting High Quality Monolingual Datasets from Web Crawl Data |
|
|
|
[2] Holger Schwenk, Guillaume Wenzek, Sergey Edunov, Edouard Grave and Armand Joulin, |
|
CCMatrix: Mining Billions of High-Quality Parallel Sentences on the WEB |
|
|
|
[3] Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, |
|
Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, |
|
Sergey Edunov, Edouard Grave, Michael Auli, and Armand Joulin. |
|
Beyond English-Centric Multilingual Machine Translation |
|
|
|
90 languages, 1,197 bitexts |
|
total number of files: 90 |
|
total number of tokens: 112.14G |
|
total number of sentence fragments: 7.37G |
|
""" |
|
_HOMEPAGE_URL = "https://opus.nlpl.eu/CCMatrix.php" |
|
_CITATION = """\ |
|
Guillaume Wenzek, Marie-Anne Lachaux, Alexis Conneau, Vishrav Chaudhary, Francisco Guzmán, Armand Jouli and Edouard Grave, CCNet: Extracting High Quality Monolingual Datasets from Web Crawl Data |
|
""" |
|
|
|
_VERSION = "1.0.0" |
|
_FILE = "CCMatrix.{}.{}" |
|
_URL = "https://opus.nlpl.eu/CCMatrix.php" |
|
_DOWNLOAD_URL = "https://object.pouta.csc.fi/OPUS-CCMatrix/v1/moses/{}.txt.zip" |
|
|
|
|
|
def get_language_pairs(): |
|
try: |
|
response = requests.get(_URL) |
|
except requests.exceptions.RequestException: |
|
warnings.warn( |
|
"Unable to download language pairs from '{}'. Using cached version".format( |
|
_URL |
|
) |
|
) |
|
from language_pairs_cache import language_pairs |
|
|
|
return language_pairs |
|
|
|
html = response.text |
|
|
|
ccmatrix_hrefs = [ |
|
href |
|
for href in re.findall(r'href=[\'"]?([^\'" >]+)', html) |
|
if href.startswith("CCMatrix/") |
|
] |
|
|
|
language_pairs = [] |
|
for href in ccmatrix_hrefs: |
|
match = re.search(r"CCMatrix/v1/(\w+-\w+)_sample.html", href) |
|
if match: |
|
language1, language2 = match.group(1).split("-") |
|
language_pairs.append((language1, language2)) |
|
language_pairs.append((language2, language1)) |
|
language_pairs.sort() |
|
return language_pairs |
|
|
|
|
|
_CONFIGS = get_language_pairs() |
|
|
|
|
|
class CCMatrixConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
super().__init__(**kwargs) |
|
lang1, lang2 = kwargs["name"].split("-") |
|
self.lang1 = lang1 |
|
self.lang2 = lang2 |
|
x, y = (lang1, lang2) if lang1 < lang2 else (lang2, lang1) |
|
self.download_pair = f"{x}-{y}" |
|
|
|
|
|
class CCMatrix(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
CCMatrixConfig( |
|
name=f"{lang1}-{lang2}", |
|
description=f"Translating {lang1} to {lang2} or vice versa", |
|
version=datasets.Version(_VERSION), |
|
) |
|
for lang1, lang2 in _CONFIGS |
|
] |
|
BUILDER_CONFIG_CLASS = CCMatrixConfig |
|
|
|
def __init__(self, *args, **kwargs): |
|
if "max_train_samples" in kwargs and kwargs.get("cache_dir", None) is None: |
|
kwargs["cache_dir"] = os.path.join( |
|
str(datasets.config.HF_DATASETS_CACHE), |
|
"trainsamples_{}".format(kwargs["max_train_samples"]), |
|
) |
|
self.max_samples = { |
|
"train": kwargs.get("max_train_samples", 2**64), |
|
} |
|
kwargs = { |
|
k: v |
|
for k, v in kwargs.items() |
|
if k not in ["max_train_samples", "id_filter"] |
|
} |
|
super().__init__(*args, **kwargs) |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"score": datasets.Value("float"), |
|
"translation": datasets.Translation( |
|
languages=(self.config.lang1, self.config.lang2) |
|
), |
|
}, |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
download_url = _DOWNLOAD_URL.format(self.config.download_pair) |
|
path = dl_manager.download_and_extract(download_url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"datapath": path, "max_samples": self.max_samples["train"]}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, datapath, max_samples): |
|
l1_path = os.path.join( |
|
datapath, _FILE.format(self.config.download_pair, self.config.lang1) |
|
) |
|
l2_path = os.path.join( |
|
datapath, _FILE.format(self.config.download_pair, self.config.lang2) |
|
) |
|
scores_path = os.path.join( |
|
datapath, _FILE.format(self.config.download_pair, "scores") |
|
) |
|
with open(l1_path, encoding="utf-8") as f1, open( |
|
l2_path, encoding="utf-8" |
|
) as f2, open(scores_path, encoding="utf-8") as f3: |
|
for sentence_counter, (x, y, score) in enumerate(zip(f1, f2, f3)): |
|
if sentence_counter == max_samples: |
|
return |
|
result = ( |
|
sentence_counter, |
|
{ |
|
"id": sentence_counter, |
|
"score": score, |
|
"translation": { |
|
self.config.lang1: x.strip(), |
|
self.config.lang2: y.strip(), |
|
}, |
|
}, |
|
) |
|
yield result |
|
|