Datasets:
wmt
/

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
system HF staff commited on
Commit
961b324
1 Parent(s): b18dd75

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (24) hide show
  1. README.md +7 -0
  2. dataset_infos.json +1 -1
  3. dummy/cs-en/1.0.0/dummy_data.zip +2 -2
  4. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2013.cs +0 -1
  5. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2013.en +0 -1
  6. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2014-csen-ref.en.sgm +0 -1
  7. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2014-csen-src.cs.sgm +0 -1
  8. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2015-csen-ref.en.sgm +0 -1
  9. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2015-csen-src.cs.sgm +0 -1
  10. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2016-csen-ref.en.sgm +0 -1
  11. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2016-csen-src.cs.sgm +0 -1
  12. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2017-csen-ref.en.sgm +0 -1
  13. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2017-csen-src.cs.sgm +0 -1
  14. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2018-csen-ref.en.sgm +0 -1
  15. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2018-csen-src.cs.sgm +0 -1
  16. dummy/cs-en/1.0.0/dummy_data/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz/paracrawl-release1.en-cs.zipporah0-dedup-clean.cs +0 -1
  17. dummy/cs-en/1.0.0/dummy_data/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz/paracrawl-release1.en-cs.zipporah0-dedup-clean.en +0 -1
  18. dummy/cs-en/1.0.0/dummy_data/training-parallel-commoncrawl.tgz/commoncrawl.cs-en.cs +0 -1
  19. dummy/cs-en/1.0.0/dummy_data/training-parallel-commoncrawl.tgz/commoncrawl.cs-en.en +0 -1
  20. dummy/cs-en/1.0.0/dummy_data/training-parallel-europarl-v7.tgz/training/europarl-v7.cs-en.cs +0 -1
  21. dummy/cs-en/1.0.0/dummy_data/training-parallel-europarl-v7.tgz/training/europarl-v7.cs-en.en +0 -1
  22. dummy/cs-en/1.0.0/dummy_data/training-parallel-nc-v13.tgz/training-parallel-nc-v13/news-commentary-v13.cs-en.cs +0 -1
  23. dummy/cs-en/1.0.0/dummy_data/training-parallel-nc-v13.tgz/training-parallel-nc-v13/news-commentary-v13.cs-en.en +0 -1
  24. wmt_utils.py +111 -103
README.md CHANGED
@@ -1,5 +1,12 @@
1
  ---
 
2
  paperswithcode_id: wmt-2018
 
 
 
 
 
 
3
  ---
4
 
5
  # Dataset Card for "wmt18"
 
1
  ---
2
+ pretty_name: WMT18
3
  paperswithcode_id: wmt-2018
4
+ multilinguality:
5
+ - translation
6
+ task_categories:
7
+ - conditional-text-generation
8
+ task_ids:
9
+ - machine-translation
10
  ---
11
 
12
  # Dataset Card for "wmt18"
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"cs-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["cs", "en"], "id": null, "_type": "Translation"}}, "supervised_keys": {"input": "cs", "output": "en"}, "builder_name": "wmt18", "config_name": "cs-en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 696229, "num_examples": 2983, "dataset_name": "wmt18"}, "train": {"name": "train", "num_bytes": 1461020779, "num_examples": 11046024, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 674430, "num_examples": 3005, "dataset_name": "wmt18"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-europarl-v7.tgz": {"num_bytes": 657632379, "checksum": "0224c7c710c8a063dfd893b0cc0830202d61f4c75c17eb8e31836103d27d96e7"}, "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz": {"num_bytes": 299052360, "checksum": "221f88bac9f48ed6ef94bad5490890066f508be00e8f102cf19edf2a1413c350"}, "https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-commoncrawl.tgz": {"num_bytes": 918311367, "checksum": "c7a74e2ea01ac6c920123108627e35278d4ccb5701e15428ffa34de86fa3a9e5"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/training-parallel-nc-v13.tgz": {"num_bytes": 113157482, "checksum": "17992b7e919cfb754c60f4e754148bc23b80706ad0ed7b34150831a554b40c91"}, "http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip": {"num_bytes": 2544381, "checksum": "e66466e00aecd392daaf547275590a9264bbc6aed70118c5c7cfd6946daf24ac"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz": {"num_bytes": 38654961, "checksum": "7a7deccf82ebb05ba508dba5eb21356492224e8f630ec4f992132b029b4b25e7"}}, "download_size": 2029352930, "dataset_size": 1462391438, "size_in_bytes": 3491744368}}
 
1
+ {"cs-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["cs", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "cs", "output": "en"}, "task_templates": null, "builder_name": "wmt18", "config_name": "cs-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1461016186, "num_examples": 11046024, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 674430, "num_examples": 3005, "dataset_name": "wmt18"}, "test": {"name": "test", "num_bytes": 696229, "num_examples": 2983, "dataset_name": "wmt18"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip": {"num_bytes": 658092427, "checksum": "5b2d8b32c2396da739b4e731871c597fcc6e75729becd74619d0712eecf7770e"}, "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz": {"num_bytes": 299052360, "checksum": "221f88bac9f48ed6ef94bad5490890066f508be00e8f102cf19edf2a1413c350"}, "https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip": {"num_bytes": 918734483, "checksum": "5ffe980072ea29adfd84568d099bea366d9f72772b988e670794ae851b4e5627"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip": {"num_bytes": 113221161, "checksum": "feff2c0315f66f94a9373bffa419f5664e16dc1e05298f0e37b2869ce4604b70"}, "http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip": {"num_bytes": 2544381, "checksum": "e66466e00aecd392daaf547275590a9264bbc6aed70118c5c7cfd6946daf24ac"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 2030359086, "post_processing_size": null, "dataset_size": 1462386845, "size_in_bytes": 3492745931}, "de-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["de", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "de", "output": "en"}, "task_templates": null, "builder_name": "wmt18", "config_name": "de-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8187552108, "num_examples": 42271874, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 729519, "num_examples": 3004, "dataset_name": "wmt18"}, "test": {"name": "test", "num_bytes": 757649, "num_examples": 2998, "dataset_name": "wmt18"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip": {"num_bytes": 658092427, "checksum": "5b2d8b32c2396da739b4e731871c597fcc6e75729becd74619d0712eecf7770e"}, "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-de.zipporah0-dedup-clean.tgz": {"num_bytes": 1918708277, "checksum": "435ce65e26ed2d44dd0d627f0b558d25bfe31d9ccb35caef050938745c23ea8c"}, "https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip": {"num_bytes": 918734483, "checksum": "5ffe980072ea29adfd84568d099bea366d9f72772b988e670794ae851b4e5627"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip": {"num_bytes": 113221161, "checksum": "feff2c0315f66f94a9373bffa419f5664e16dc1e05298f0e37b2869ce4604b70"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip": {"num_bytes": 161141713, "checksum": "93217093c624d9e16023fee98afb089208cca5937c2c08ee7edc707196d09a28"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 3808612335, "post_processing_size": null, "dataset_size": 8189039276, "size_in_bytes": 11997651611}, "et-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["et", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "et", "output": "en"}, "task_templates": null, "builder_name": "wmt18", "config_name": "et-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 647992667, "num_examples": 2175873, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 459398, "num_examples": 2000, "dataset_name": "wmt18"}, "test": {"name": "test", "num_bytes": 489394, "num_examples": 2000, "dataset_name": "wmt18"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-ep-v8.zip": {"num_bytes": 246395103, "checksum": "ee36fc5dc5767d6fc661dc4b0c0acde293f45095ca74ba1af411b23b351271c9"}, "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-et.zipporah0-dedup-clean.tgz": {"num_bytes": 78283314, "checksum": "1c3065a8e04a5a6d09d5d5c72ece8aeabcc418eb48cf85038bba6cdef638dc7d"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip": {"num_bytes": 161141713, "checksum": "93217093c624d9e16023fee98afb089208cca5937c2c08ee7edc707196d09a28"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 524534404, "post_processing_size": null, "dataset_size": 648941459, "size_in_bytes": 1173475863}, "fi-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["fi", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "fi", "output": "en"}, "task_templates": null, "builder_name": "wmt18", "config_name": "fi-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 857171881, "num_examples": 3280600, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 1388828, "num_examples": 6004, "dataset_name": "wmt18"}, "test": {"name": "test", "num_bytes": 691841, "num_examples": 3000, "dataset_name": "wmt18"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-ep-v8.zip": {"num_bytes": 246395103, "checksum": "ee36fc5dc5767d6fc661dc4b0c0acde293f45095ca74ba1af411b23b351271c9"}, "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-fi.zipporah0-dedup-clean.tgz": {"num_bytes": 36138086, "checksum": "ce3b46e928d37ae02ab8ce7e0ae0e1f89d1aed5e60271056913e35ff742e65ff"}, "https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip": {"num_bytes": 9485604, "checksum": "b3134566261b39d830eed345df1be1864039339cfeccf24b1bf86398c9e4a87c"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip": {"num_bytes": 161141713, "checksum": "93217093c624d9e16023fee98afb089208cca5937c2c08ee7edc707196d09a28"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 491874780, "post_processing_size": null, "dataset_size": 859252550, "size_in_bytes": 1351127330}, "kk-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["kk", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "kk", "output": "en"}, "task_templates": null, "builder_name": "wmt18", "config_name": "kk-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "wmt18"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "wmt18"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 0}, "ru-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["ru", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "ru", "output": "en"}, "task_templates": null, "builder_name": "wmt18", "config_name": "ru-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13665367647, "num_examples": 36858512, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 1040195, "num_examples": 3001, "dataset_name": "wmt18"}, "test": {"name": "test", "num_bytes": 1085596, "num_examples": 3000, "dataset_name": "wmt18"}}, "download_checksums": {"https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz": {"num_bytes": 667981874, "checksum": "d4902407ef462034e88fbf5d8712a11c4b32a6e0e82d3a1b4f42a6f33d94f3c0"}, "https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip": {"num_bytes": 918734483, "checksum": "5ffe980072ea29adfd84568d099bea366d9f72772b988e670794ae851b4e5627"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip": {"num_bytes": 113221161, "checksum": "feff2c0315f66f94a9373bffa419f5664e16dc1e05298f0e37b2869ce4604b70"}, "https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip": {"num_bytes": 9485604, "checksum": "b3134566261b39d830eed345df1be1864039339cfeccf24b1bf86398c9e4a87c"}, "https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-ru.zip": {"num_bytes": 2447006960, "checksum": "72c2670fa6aadb36d541cba91cd26b9da291a976bf1a2748177a57baf8261f4c"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 4195144356, "post_processing_size": null, "dataset_size": 13667493438, "size_in_bytes": 17862637794}, "tr-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["tr", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "tr", "output": "en"}, "task_templates": null, "builder_name": "wmt18", "config_name": "tr-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60416617, "num_examples": 205756, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 752773, "num_examples": 3007, "dataset_name": "wmt18"}, "test": {"name": "test", "num_bytes": 770313, "num_examples": 3000, "dataset_name": "wmt18"}}, "download_checksums": {"https://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-tr.tmx.gz": {"num_bytes": 23548787, "checksum": "23581212dc3267383198a92636219fceb3f23207bfc1d1e78ab60a2cb465eff8"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 62263061, "post_processing_size": null, "dataset_size": 61939703, "size_in_bytes": 124202764}, "zh-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "@InProceedings{bojar-EtAl:2018:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Federmann, Christian and Fishel, Mark\n and Graham, Yvette and Haddow, Barry and Huck, Matthias and\n Koehn, Philipp and Monz, Christof},\n title = {Findings of the 2018 Conference on Machine Translation (WMT18)},\n booktitle = {Proceedings of the Third Conference on Machine Translation,\n Volume 2: Shared Task Papers},\n month = {October},\n year = {2018},\n address = {Belgium, Brussels},\n publisher = {Association for Computational Linguistics},\n pages = {272--307},\n url = {http://www.aclweb.org/anthology/W18-6401}\n}\n", "homepage": "http://www.statmt.org/wmt18/translation-task.html", "license": "", "features": {"translation": {"languages": ["zh", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "zh", "output": "en"}, "task_templates": null, "builder_name": "wmt18", "config_name": "zh-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5536169801, "num_examples": 25160346, "dataset_name": "wmt18"}, "validation": {"name": "validation", "num_bytes": 540347, "num_examples": 2001, "dataset_name": "wmt18"}, "test": {"name": "test", "num_bytes": 1107522, "num_examples": 3981, "dataset_name": "wmt18"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip": {"num_bytes": 113221161, "checksum": "feff2c0315f66f94a9373bffa419f5664e16dc1e05298f0e37b2869ce4604b70"}, "https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-zh.zip": {"num_bytes": 1385832125, "checksum": "97f5ce0892084cdbb2332b52ffcc0299a649ba0a43712d921575fe2b7edfb4b4"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/casia2015.zip": {"num_bytes": 98159063, "checksum": "c939f1528f96c419e9bbffb9caad869616a969e7704ffac896e245a02aff59a9"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/casict2011.zip": {"num_bytes": 166957775, "checksum": "606adc0ccc5d8fc7c47f8589991286616342a1a379a571ce3038918731ae0182"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/casict2015.zip": {"num_bytes": 106836569, "checksum": "eef8e25b297c1aff12ab24719247d3588e756d7a4e2c30d4d34fcb4d05ab1050"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/datum2015.zip": {"num_bytes": 100118018, "checksum": "654afce6731485c40ce856514ab80cd2bfd836126bcaf48cdb911ebc32b021a4"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/datum2017.zip": {"num_bytes": 99278067, "checksum": "737455c139596f4abf3b1da73bc38932b3ef9534549328eff47d867e29950ed2"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/neu2017.zip": {"num_bytes": 150311715, "checksum": "5c5ea9ac5cbc43c974bd53796a3a29829800865b6398b52cda0a3854cb0d2e03"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 2259428767, "post_processing_size": null, "dataset_size": 5537817670, "size_in_bytes": 7797246437}}
dummy/cs-en/1.0.0/dummy_data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c5db9447b0787be367f458cf41465fc0a797b4b80a0af268801292610958c30
3
- size 7514
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4031b929d6d0fafce25ec3ea748ad0c1f9a37a74121cf25884c83056205f5b1
3
+ size 9393
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2013.cs DELETED
@@ -1 +0,0 @@
1
- Just a test sentence.
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2013.en DELETED
@@ -1 +0,0 @@
1
- Just a test sentence.
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2014-csen-ref.en.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2014-csen-src.cs.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2015-csen-ref.en.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2015-csen-src.cs.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2016-csen-ref.en.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2016-csen-src.cs.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2017-csen-ref.en.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2017-csen-src.cs.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2018-csen-ref.en.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2018-csen-src.cs.sgm DELETED
@@ -1 +0,0 @@
1
- <seg id="1"> Test </seg>
 
 
dummy/cs-en/1.0.0/dummy_data/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz/paracrawl-release1.en-cs.zipporah0-dedup-clean.cs DELETED
@@ -1 +0,0 @@
1
- This is a test sentence.
 
 
dummy/cs-en/1.0.0/dummy_data/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz/paracrawl-release1.en-cs.zipporah0-dedup-clean.en DELETED
@@ -1 +0,0 @@
1
- This is a test sentence.
 
 
dummy/cs-en/1.0.0/dummy_data/training-parallel-commoncrawl.tgz/commoncrawl.cs-en.cs DELETED
@@ -1 +0,0 @@
1
- This is a test sentence.
 
 
dummy/cs-en/1.0.0/dummy_data/training-parallel-commoncrawl.tgz/commoncrawl.cs-en.en DELETED
@@ -1 +0,0 @@
1
- This is a test sentence.
 
 
dummy/cs-en/1.0.0/dummy_data/training-parallel-europarl-v7.tgz/training/europarl-v7.cs-en.cs DELETED
@@ -1 +0,0 @@
1
- This is a test sentence to pass the tests.
 
 
dummy/cs-en/1.0.0/dummy_data/training-parallel-europarl-v7.tgz/training/europarl-v7.cs-en.en DELETED
@@ -1 +0,0 @@
1
- This is a test sentence
 
 
dummy/cs-en/1.0.0/dummy_data/training-parallel-nc-v13.tgz/training-parallel-nc-v13/news-commentary-v13.cs-en.cs DELETED
@@ -1 +0,0 @@
1
- This is a test sentence.
 
 
dummy/cs-en/1.0.0/dummy_data/training-parallel-nc-v13.tgz/training-parallel-nc-v13/news-commentary-v13.cs-en.en DELETED
@@ -1 +0,0 @@
1
- This is a test sentence.
 
 
wmt_utils.py CHANGED
@@ -96,7 +96,7 @@ class SubDataset:
96
  def _inject_language(self, src, strings):
97
  """Injects languages into (potentially) template strings."""
98
  if src not in self.sources:
99
- raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
100
 
101
  def _format_string(s):
102
  if "{0}" in s and "{1}" and "{src}" in s:
@@ -127,7 +127,7 @@ _TRAIN_SUBSETS = [
127
  name="commoncrawl",
128
  target="en", # fr-de pair in commoncrawl_frde
129
  sources={"cs", "de", "es", "fr", "ru"},
130
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-commoncrawl.tgz",
131
  path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
132
  ),
133
  SubDataset(
@@ -184,14 +184,14 @@ _TRAIN_SUBSETS = [
184
  name="dcep_v1",
185
  target="en",
186
  sources={"lv"},
187
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/dcep.lv-en.v1.tgz",
188
  path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
189
  ),
190
  SubDataset(
191
  name="europarl_v7",
192
  target="en",
193
  sources={"cs", "de", "es", "fr"},
194
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-europarl-v7.tgz",
195
  path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
196
  ),
197
  SubDataset(
@@ -208,14 +208,14 @@ _TRAIN_SUBSETS = [
208
  name="europarl_v8_18",
209
  target="en",
210
  sources={"et", "fi"},
211
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/training-parallel-ep-v8.tgz",
212
  path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
213
  ),
214
  SubDataset(
215
  name="europarl_v8_16",
216
  target="en",
217
  sources={"fi", "ro"},
218
- url="https://huggingface.co/datasets/wmt/wmt16/resolve/main/translation-task/training-parallel-ep-v8.tgz",
219
  path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
220
  ),
221
  SubDataset(
@@ -229,7 +229,7 @@ _TRAIN_SUBSETS = [
229
  name="gigafren",
230
  target="en",
231
  sources={"fr"},
232
- url="https://huggingface.co/datasets/wmt/wmt10/resolve/main/training-giga-fren.tar",
233
  path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
234
  ),
235
  SubDataset(
@@ -244,35 +244,35 @@ _TRAIN_SUBSETS = [
244
  name="leta_v1",
245
  target="en",
246
  sources={"lv"},
247
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/leta.v1.tgz",
248
  path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
249
  ),
250
  SubDataset(
251
  name="multiun",
252
  target="en",
253
  sources={"es", "fr"},
254
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-un.tgz",
255
  path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
256
  ),
257
  SubDataset(
258
  name="newscommentary_v9",
259
  target="en",
260
  sources={"cs", "de", "fr", "ru"},
261
- url="https://huggingface.co/datasets/wmt/wmt14/resolve/main/training-parallel-nc-v9.tgz",
262
  path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
263
  ),
264
  SubDataset(
265
  name="newscommentary_v10",
266
  target="en",
267
  sources={"cs", "de", "fr", "ru"},
268
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/training-parallel-nc-v10.tgz",
269
  path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
270
  ),
271
  SubDataset(
272
  name="newscommentary_v11",
273
  target="en",
274
  sources={"cs", "de", "ru"},
275
- url="https://huggingface.co/datasets/wmt/wmt16/resolve/main/translation-task/training-parallel-nc-v11.tgz",
276
  path=(
277
  "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
278
  "training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
@@ -282,14 +282,14 @@ _TRAIN_SUBSETS = [
282
  name="newscommentary_v12",
283
  target="en",
284
  sources={"cs", "de", "ru", "zh"},
285
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/training-parallel-nc-v12.tgz",
286
  path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
287
  ),
288
  SubDataset(
289
  name="newscommentary_v13",
290
  target="en",
291
  sources={"cs", "de", "ru", "zh"},
292
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/training-parallel-nc-v13.tgz",
293
  path=(
294
  "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
295
  "training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
@@ -313,14 +313,14 @@ _TRAIN_SUBSETS = [
313
  name="onlinebooks_v1",
314
  target="en",
315
  sources={"lv"},
316
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/books.lv-en.v1.tgz",
317
  path=("farewell/farewell.lv", "farewell/farewell.en"),
318
  ),
319
  SubDataset(
320
  name="paracrawl_v1",
321
  target="en",
322
  sources={"cs", "de", "et", "fi", "ru"},
323
- url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
324
  path=(
325
  "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
326
  "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
@@ -330,7 +330,7 @@ _TRAIN_SUBSETS = [
330
  name="paracrawl_v1_ru",
331
  target="en",
332
  sources={"ru"},
333
- url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
334
  path=(
335
  "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
336
  "paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
@@ -357,7 +357,7 @@ _TRAIN_SUBSETS = [
357
  name="rapid_2016",
358
  target="en",
359
  sources={"de", "et", "fi"},
360
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/rapid2016.tgz",
361
  path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
362
  ),
363
  SubDataset(
@@ -385,21 +385,21 @@ _TRAIN_SUBSETS = [
385
  name="uncorpus_v1",
386
  target="en",
387
  sources={"ru", "zh"},
388
- url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main/UNv1.0.en-{src}.tar.gz",
389
  path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
390
  ),
391
  SubDataset(
392
  name="wikiheadlines_fi",
393
  target="en",
394
  sources={"fi"},
395
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/wiki-titles.tgz",
396
  path="wiki/fi-en/titles.fi-en",
397
  ),
398
  SubDataset(
399
  name="wikiheadlines_hi",
400
  target="en",
401
  sources={"hi"},
402
- url="https://huggingface.co/datasets/wmt/wmt14/resolve/main/wiki-titles.tgz",
403
  path="wiki/hi-en/wiki-titles.hi-en",
404
  ),
405
  SubDataset(
@@ -407,7 +407,7 @@ _TRAIN_SUBSETS = [
407
  name="wikiheadlines_ru",
408
  target="en",
409
  sources={"ru"},
410
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/wiki-titles.tgz",
411
  path="wiki/ru-en/wiki.ru-en",
412
  ),
413
  SubDataset(
@@ -431,7 +431,7 @@ _TRAIN_SUBSETS = [
431
  name=ss,
432
  target="en",
433
  sources={"zh"},
434
- url="ftp://cwmt-wmt:cwmt-wmt@datasets.nju.edu.cn/parallel/%s.zip" % ss,
435
  path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
436
  )
437
  for ss in CWMT_SUBSET_NAMES
@@ -442,175 +442,175 @@ _DEV_SUBSETS = [
442
  name="euelections_dev2019",
443
  target="de",
444
  sources={"fr"},
445
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
446
  path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
447
  ),
448
  SubDataset(
449
  name="newsdev2014",
450
  target="en",
451
  sources={"hi"},
452
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
453
  path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
454
  ),
455
  SubDataset(
456
  name="newsdev2015",
457
  target="en",
458
  sources={"fi"},
459
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
460
  path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
461
  ),
462
  SubDataset(
463
  name="newsdiscussdev2015",
464
  target="en",
465
  sources={"ro", "tr"},
466
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
467
  path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
468
  ),
469
  SubDataset(
470
  name="newsdev2016",
471
  target="en",
472
  sources={"ro", "tr"},
473
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
474
  path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
475
  ),
476
  SubDataset(
477
  name="newsdev2017",
478
  target="en",
479
  sources={"lv", "zh"},
480
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
481
  path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
482
  ),
483
  SubDataset(
484
  name="newsdev2018",
485
  target="en",
486
  sources={"et"},
487
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
488
  path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
489
  ),
490
  SubDataset(
491
  name="newsdev2019",
492
  target="en",
493
  sources={"gu", "kk", "lt"},
494
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
495
  path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
496
  ),
497
  SubDataset(
498
  name="newsdiscussdev2015",
499
  target="en",
500
  sources={"fr"},
501
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
502
  path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
503
  ),
504
  SubDataset(
505
  name="newsdiscusstest2015",
506
  target="en",
507
  sources={"fr"},
508
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
509
  path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
510
  ),
511
  SubDataset(
512
  name="newssyscomb2009",
513
  target="en",
514
  sources={"cs", "de", "es", "fr"},
515
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
516
  path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
517
  ),
518
  SubDataset(
519
  name="newstest2008",
520
  target="en",
521
  sources={"cs", "de", "es", "fr", "hu"},
522
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
523
  path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
524
  ),
525
  SubDataset(
526
  name="newstest2009",
527
  target="en",
528
  sources={"cs", "de", "es", "fr"},
529
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
530
  path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
531
  ),
532
  SubDataset(
533
  name="newstest2010",
534
  target="en",
535
  sources={"cs", "de", "es", "fr"},
536
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
537
  path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
538
  ),
539
  SubDataset(
540
  name="newstest2011",
541
  target="en",
542
  sources={"cs", "de", "es", "fr"},
543
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
544
  path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
545
  ),
546
  SubDataset(
547
  name="newstest2012",
548
  target="en",
549
  sources={"cs", "de", "es", "fr", "ru"},
550
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
551
  path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
552
  ),
553
  SubDataset(
554
  name="newstest2013",
555
  target="en",
556
  sources={"cs", "de", "es", "fr", "ru"},
557
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
558
  path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
559
  ),
560
  SubDataset(
561
  name="newstest2014",
562
  target="en",
563
  sources={"cs", "de", "es", "fr", "hi", "ru"},
564
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
565
  path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
566
  ),
567
  SubDataset(
568
  name="newstest2015",
569
  target="en",
570
  sources={"cs", "de", "fi", "ru"},
571
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
572
  path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
573
  ),
574
  SubDataset(
575
  name="newsdiscusstest2015",
576
  target="en",
577
  sources={"fr"},
578
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
579
  path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
580
  ),
581
  SubDataset(
582
  name="newstest2016",
583
  target="en",
584
  sources={"cs", "de", "fi", "ro", "ru", "tr"},
585
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
586
  path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
587
  ),
588
  SubDataset(
589
  name="newstestB2016",
590
  target="en",
591
  sources={"fi"},
592
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
593
  path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
594
  ),
595
  SubDataset(
596
  name="newstest2017",
597
  target="en",
598
  sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
599
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
600
  path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
601
  ),
602
  SubDataset(
603
  name="newstestB2017",
604
  target="en",
605
  sources={"fi"},
606
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
607
  path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
608
  ),
609
  SubDataset(
610
  name="newstest2018",
611
  target="en",
612
  sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
613
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
614
  path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
615
  ),
616
  ]
@@ -658,9 +658,7 @@ class WmtConfig(datasets.BuilderConfig):
658
  # TODO(PVP): remove when manual dir works
659
  # +++++++++++++++++++++
660
  if language_pair[1] in ["cs", "hi", "ru"]:
661
- assert NotImplementedError(
662
- "The dataset for {}-en is currently not fully supported.".format(language_pair[1])
663
- )
664
  # +++++++++++++++++++++
665
 
666
 
@@ -730,7 +728,7 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
730
  if dataset.get_manual_dl_files(source):
731
  # TODO(PVP): following two lines skip configs that are incomplete for now
732
  # +++++++++++++++++++++
733
- logger.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
734
  continue
735
  # +++++++++++++++++++++
736
 
@@ -741,9 +739,7 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
741
  ]
742
  assert all(
743
  os.path.exists(path) for path in manual_paths
744
- ), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
745
- dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
746
- )
747
 
748
  # set manual path for correct subset
749
  manual_paths_dict[ss_name] = manual_paths
@@ -779,24 +775,36 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
779
  for ex_dir, rel_path in zip(extract_dirs, rel_paths)
780
  ]
781
 
 
 
 
 
 
 
 
782
  for ss_name in split_subsets:
783
  # TODO(PVP) remove following five lines when manual data works
784
  # +++++++++++++++++++++
785
  dataset = DATASET_MAP[ss_name]
786
  source, _ = self.config.language_pair
787
  if dataset.get_manual_dl_files(source):
788
- logger.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
789
  continue
790
  # +++++++++++++++++++++
791
 
792
  logger.info("Generating examples from: %s", ss_name)
 
793
  dataset = DATASET_MAP[ss_name]
794
  extract_dirs = extraction_map[ss_name]
795
  files = _get_local_paths(dataset, extract_dirs)
 
 
 
796
 
797
  if ss_name.startswith("czeng"):
798
  if ss_name.endswith("16pre"):
799
  sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
 
800
  elif ss_name.endswith("17"):
801
  filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
802
  sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
@@ -809,18 +817,21 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
809
  sub_generator = _parse_frde_bitext
810
  else:
811
  sub_generator = _parse_parallel_sentences
 
812
  elif len(files) == 1:
813
- fname = files[0]
814
  # Note: Due to formatting used by `download_manager`, the file
815
  # extension may not be at the end of the file path.
816
  if ".tsv" in fname:
817
  sub_generator = _parse_tsv
 
818
  elif (
819
  ss_name.startswith("newscommentary_v14")
820
  or ss_name.startswith("europarl_v9")
821
  or ss_name.startswith("wikititles_v1")
822
  ):
823
  sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
 
824
  elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
825
  sub_generator = _parse_tmx
826
  elif ss_name.startswith("wikiheadlines"):
@@ -830,28 +841,33 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
830
  else:
831
  raise ValueError("Invalid number of files: %d" % len(files))
832
 
833
- for sub_key, ex in sub_generator(*files):
834
  if not all(ex.values()):
835
  continue
836
  # TODO(adarob): Add subset feature.
837
  # ex["subset"] = subset
838
- key = "{}/{}".format(ss_name, sub_key)
839
  if with_translation is True:
840
  ex = {"translation": ex}
841
  yield key, ex
842
 
843
 
844
- def _parse_parallel_sentences(f1, f2):
845
  """Returns examples from parallel SGML or text files, which may be gzipped."""
846
 
847
- def _parse_text(path):
848
  """Returns the sentences from a single text file, which may be gzipped."""
849
- split_path = path.split(".")
850
 
851
  if split_path[-1] == "gz":
852
  lang = split_path[-2]
853
- with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
854
- return g.read().decode("utf-8").split("\n"), lang
 
 
 
 
 
855
 
856
  if split_path[-1] == "txt":
857
  # CWMT
@@ -859,25 +875,32 @@ def _parse_parallel_sentences(f1, f2):
859
  lang = "zh" if lang in ("ch", "cn") else lang
860
  else:
861
  lang = split_path[-1]
862
- with open(path, "rb") as f:
863
- return f.read().decode("utf-8").split("\n"), lang
864
 
865
- def _parse_sgm(path):
 
 
 
 
 
 
 
866
  """Returns sentences from a single SGML file."""
867
- lang = path.split(".")[-2]
868
- sentences = []
869
  # Note: We can't use the XML parser since some of the files are badly
870
  # formatted.
871
  seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
872
- with open(path, encoding="utf-8") as f:
873
- for line in f:
874
- seg_match = re.match(seg_re, line)
875
- if seg_match:
876
- assert len(seg_match.groups()) == 1
877
- sentences.append(seg_match.groups()[0])
878
- return sentences, lang
879
 
880
- parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
 
 
 
 
 
 
 
 
 
 
881
 
882
  # Some datasets (e.g., CWMT) contain multiple parallel files specified with
883
  # a wildcard. We sort both sets to align them and parse them one by one.
@@ -893,34 +916,19 @@ def _parse_parallel_sentences(f1, f2):
893
  )
894
 
895
  for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
896
- l1_sentences, l1 = parse_file(f1_i)
897
- l2_sentences, l2 = parse_file(f2_i)
898
-
899
- assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
900
- len(l1_sentences),
901
- len(l2_sentences),
902
- f1_i,
903
- f2_i,
904
- )
905
 
906
  for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
907
- key = "{}/{}".format(f_id, line_id)
908
  yield key, {l1: s1, l2: s2}
909
 
910
 
911
  def _parse_frde_bitext(fr_path, de_path):
912
- with open(fr_path, encoding="utf-8") as f:
913
- fr_sentences = f.read().split("\n")
914
- with open(de_path, encoding="utf-8") as f:
915
- de_sentences = f.read().split("\n")
916
- assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
917
- len(fr_sentences),
918
- len(de_sentences),
919
- fr_path,
920
- de_path,
921
- )
922
- for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
923
- yield line_id, {"fr": s1, "de": s2}
924
 
925
 
926
  def _parse_tmx(path):
@@ -946,11 +954,11 @@ def _parse_tmx(path):
946
  elem.clear()
947
 
948
 
949
- def _parse_tsv(path, language_pair=None):
950
  """Generates examples from TSV file."""
951
  if language_pair is None:
952
- lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
953
- assert lang_match is not None, "Invalid TSV filename: %s" % path
954
  l1, l2 = lang_match.groups()
955
  else:
956
  l1, l2 = language_pair
@@ -997,7 +1005,7 @@ def _parse_czeng(*paths, **kwargs):
997
  block_match = re.match(re_block, id_)
998
  if block_match and block_match.groups()[0] in bad_blocks:
999
  continue
1000
- sub_key = "{}/{}".format(filename, line_id)
1001
  yield sub_key, {
1002
  "cs": cs.strip(),
1003
  "en": en.strip(),
 
96
  def _inject_language(self, src, strings):
97
  """Injects languages into (potentially) template strings."""
98
  if src not in self.sources:
99
+ raise ValueError(f"Invalid source for '{self.name}': {src}")
100
 
101
  def _format_string(s):
102
  if "{0}" in s and "{1}" and "{src}" in s:
 
127
  name="commoncrawl",
128
  target="en", # fr-de pair in commoncrawl_frde
129
  sources={"cs", "de", "es", "fr", "ru"},
130
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip",
131
  path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
132
  ),
133
  SubDataset(
 
184
  name="dcep_v1",
185
  target="en",
186
  sources={"lv"},
187
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/dcep.lv-en.v1.zip",
188
  path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
189
  ),
190
  SubDataset(
191
  name="europarl_v7",
192
  target="en",
193
  sources={"cs", "de", "es", "fr"},
194
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip",
195
  path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
196
  ),
197
  SubDataset(
 
208
  name="europarl_v8_18",
209
  target="en",
210
  sources={"et", "fi"},
211
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-ep-v8.zip",
212
  path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
213
  ),
214
  SubDataset(
215
  name="europarl_v8_16",
216
  target="en",
217
  sources={"fi", "ro"},
218
+ url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-ep-v8.zip",
219
  path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
220
  ),
221
  SubDataset(
 
229
  name="gigafren",
230
  target="en",
231
  sources={"fr"},
232
+ url="https://huggingface.co/datasets/wmt/wmt10/resolve/main-zip/training-giga-fren.zip",
233
  path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
234
  ),
235
  SubDataset(
 
244
  name="leta_v1",
245
  target="en",
246
  sources={"lv"},
247
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/leta.v1.zip",
248
  path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
249
  ),
250
  SubDataset(
251
  name="multiun",
252
  target="en",
253
  sources={"es", "fr"},
254
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-un.zip",
255
  path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
256
  ),
257
  SubDataset(
258
  name="newscommentary_v9",
259
  target="en",
260
  sources={"cs", "de", "fr", "ru"},
261
+ url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/training-parallel-nc-v9.zip",
262
  path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
263
  ),
264
  SubDataset(
265
  name="newscommentary_v10",
266
  target="en",
267
  sources={"cs", "de", "fr", "ru"},
268
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/training-parallel-nc-v10.zip",
269
  path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
270
  ),
271
  SubDataset(
272
  name="newscommentary_v11",
273
  target="en",
274
  sources={"cs", "de", "ru"},
275
+ url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-nc-v11.zip",
276
  path=(
277
  "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
278
  "training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
 
282
  name="newscommentary_v12",
283
  target="en",
284
  sources={"cs", "de", "ru", "zh"},
285
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip",
286
  path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
287
  ),
288
  SubDataset(
289
  name="newscommentary_v13",
290
  target="en",
291
  sources={"cs", "de", "ru", "zh"},
292
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip",
293
  path=(
294
  "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
295
  "training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
 
313
  name="onlinebooks_v1",
314
  target="en",
315
  sources={"lv"},
316
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/books.lv-en.v1.zip",
317
  path=("farewell/farewell.lv", "farewell/farewell.en"),
318
  ),
319
  SubDataset(
320
  name="paracrawl_v1",
321
  target="en",
322
  sources={"cs", "de", "et", "fi", "ru"},
323
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming
324
  path=(
325
  "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
326
  "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
 
330
  name="paracrawl_v1_ru",
331
  target="en",
332
  sources={"ru"},
333
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming
334
  path=(
335
  "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
336
  "paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
 
357
  name="rapid_2016",
358
  target="en",
359
  sources={"de", "et", "fi"},
360
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip",
361
  path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
362
  ),
363
  SubDataset(
 
385
  name="uncorpus_v1",
386
  target="en",
387
  sources={"ru", "zh"},
388
+ url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-{src}.zip",
389
  path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
390
  ),
391
  SubDataset(
392
  name="wikiheadlines_fi",
393
  target="en",
394
  sources={"fi"},
395
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip",
396
  path="wiki/fi-en/titles.fi-en",
397
  ),
398
  SubDataset(
399
  name="wikiheadlines_hi",
400
  target="en",
401
  sources={"hi"},
402
+ url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/wiki-titles.zip",
403
  path="wiki/hi-en/wiki-titles.hi-en",
404
  ),
405
  SubDataset(
 
407
  name="wikiheadlines_ru",
408
  target="en",
409
  sources={"ru"},
410
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip",
411
  path="wiki/ru-en/wiki.ru-en",
412
  ),
413
  SubDataset(
 
431
  name=ss,
432
  target="en",
433
  sources={"zh"},
434
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/%s.zip" % ss,
435
  path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
436
  )
437
  for ss in CWMT_SUBSET_NAMES
 
442
  name="euelections_dev2019",
443
  target="de",
444
  sources={"fr"},
445
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
446
  path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
447
  ),
448
  SubDataset(
449
  name="newsdev2014",
450
  target="en",
451
  sources={"hi"},
452
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
453
  path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
454
  ),
455
  SubDataset(
456
  name="newsdev2015",
457
  target="en",
458
  sources={"fi"},
459
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
460
  path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
461
  ),
462
  SubDataset(
463
  name="newsdiscussdev2015",
464
  target="en",
465
  sources={"ro", "tr"},
466
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
467
  path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
468
  ),
469
  SubDataset(
470
  name="newsdev2016",
471
  target="en",
472
  sources={"ro", "tr"},
473
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
474
  path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
475
  ),
476
  SubDataset(
477
  name="newsdev2017",
478
  target="en",
479
  sources={"lv", "zh"},
480
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
481
  path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
482
  ),
483
  SubDataset(
484
  name="newsdev2018",
485
  target="en",
486
  sources={"et"},
487
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
488
  path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
489
  ),
490
  SubDataset(
491
  name="newsdev2019",
492
  target="en",
493
  sources={"gu", "kk", "lt"},
494
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
495
  path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
496
  ),
497
  SubDataset(
498
  name="newsdiscussdev2015",
499
  target="en",
500
  sources={"fr"},
501
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
502
  path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
503
  ),
504
  SubDataset(
505
  name="newsdiscusstest2015",
506
  target="en",
507
  sources={"fr"},
508
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
509
  path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
510
  ),
511
  SubDataset(
512
  name="newssyscomb2009",
513
  target="en",
514
  sources={"cs", "de", "es", "fr"},
515
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
516
  path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
517
  ),
518
  SubDataset(
519
  name="newstest2008",
520
  target="en",
521
  sources={"cs", "de", "es", "fr", "hu"},
522
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
523
  path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
524
  ),
525
  SubDataset(
526
  name="newstest2009",
527
  target="en",
528
  sources={"cs", "de", "es", "fr"},
529
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
530
  path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
531
  ),
532
  SubDataset(
533
  name="newstest2010",
534
  target="en",
535
  sources={"cs", "de", "es", "fr"},
536
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
537
  path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
538
  ),
539
  SubDataset(
540
  name="newstest2011",
541
  target="en",
542
  sources={"cs", "de", "es", "fr"},
543
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
544
  path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
545
  ),
546
  SubDataset(
547
  name="newstest2012",
548
  target="en",
549
  sources={"cs", "de", "es", "fr", "ru"},
550
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
551
  path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
552
  ),
553
  SubDataset(
554
  name="newstest2013",
555
  target="en",
556
  sources={"cs", "de", "es", "fr", "ru"},
557
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
558
  path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
559
  ),
560
  SubDataset(
561
  name="newstest2014",
562
  target="en",
563
  sources={"cs", "de", "es", "fr", "hi", "ru"},
564
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
565
  path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
566
  ),
567
  SubDataset(
568
  name="newstest2015",
569
  target="en",
570
  sources={"cs", "de", "fi", "ru"},
571
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
572
  path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
573
  ),
574
  SubDataset(
575
  name="newsdiscusstest2015",
576
  target="en",
577
  sources={"fr"},
578
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
579
  path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
580
  ),
581
  SubDataset(
582
  name="newstest2016",
583
  target="en",
584
  sources={"cs", "de", "fi", "ro", "ru", "tr"},
585
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
586
  path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
587
  ),
588
  SubDataset(
589
  name="newstestB2016",
590
  target="en",
591
  sources={"fi"},
592
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
593
  path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
594
  ),
595
  SubDataset(
596
  name="newstest2017",
597
  target="en",
598
  sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
599
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
600
  path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
601
  ),
602
  SubDataset(
603
  name="newstestB2017",
604
  target="en",
605
  sources={"fi"},
606
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
607
  path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
608
  ),
609
  SubDataset(
610
  name="newstest2018",
611
  target="en",
612
  sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
613
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
614
  path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
615
  ),
616
  ]
 
658
  # TODO(PVP): remove when manual dir works
659
  # +++++++++++++++++++++
660
  if language_pair[1] in ["cs", "hi", "ru"]:
661
+ assert NotImplementedError(f"The dataset for {language_pair[1]}-en is currently not fully supported.")
 
 
662
  # +++++++++++++++++++++
663
 
664
 
 
728
  if dataset.get_manual_dl_files(source):
729
  # TODO(PVP): following two lines skip configs that are incomplete for now
730
  # +++++++++++++++++++++
731
+ logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
732
  continue
733
  # +++++++++++++++++++++
734
 
 
739
  ]
740
  assert all(
741
  os.path.exists(path) for path in manual_paths
742
+ ), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}"
 
 
743
 
744
  # set manual path for correct subset
745
  manual_paths_dict[ss_name] = manual_paths
 
775
  for ex_dir, rel_path in zip(extract_dirs, rel_paths)
776
  ]
777
 
778
+ def _get_filenames(dataset):
779
+ rel_paths = dataset.get_path(source)
780
+ urls = dataset.get_url(source)
781
+ if len(urls) == 1:
782
+ urls = urls * len(rel_paths)
783
+ return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)]
784
+
785
  for ss_name in split_subsets:
786
  # TODO(PVP) remove following five lines when manual data works
787
  # +++++++++++++++++++++
788
  dataset = DATASET_MAP[ss_name]
789
  source, _ = self.config.language_pair
790
  if dataset.get_manual_dl_files(source):
791
+ logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
792
  continue
793
  # +++++++++++++++++++++
794
 
795
  logger.info("Generating examples from: %s", ss_name)
796
+ print("Generating examples from: %s", ss_name)
797
  dataset = DATASET_MAP[ss_name]
798
  extract_dirs = extraction_map[ss_name]
799
  files = _get_local_paths(dataset, extract_dirs)
800
+ filenames = _get_filenames(dataset)
801
+
802
+ sub_generator_args = tuple(files)
803
 
804
  if ss_name.startswith("czeng"):
805
  if ss_name.endswith("16pre"):
806
  sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
807
+ sub_generator_args += tuple(filenames)
808
  elif ss_name.endswith("17"):
809
  filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
810
  sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
 
817
  sub_generator = _parse_frde_bitext
818
  else:
819
  sub_generator = _parse_parallel_sentences
820
+ sub_generator_args += tuple(filenames)
821
  elif len(files) == 1:
822
+ fname = filenames[0]
823
  # Note: Due to formatting used by `download_manager`, the file
824
  # extension may not be at the end of the file path.
825
  if ".tsv" in fname:
826
  sub_generator = _parse_tsv
827
+ sub_generator_args += tuple(filenames)
828
  elif (
829
  ss_name.startswith("newscommentary_v14")
830
  or ss_name.startswith("europarl_v9")
831
  or ss_name.startswith("wikititles_v1")
832
  ):
833
  sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
834
+ sub_generator_args += tuple(filenames)
835
  elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
836
  sub_generator = _parse_tmx
837
  elif ss_name.startswith("wikiheadlines"):
 
841
  else:
842
  raise ValueError("Invalid number of files: %d" % len(files))
843
 
844
+ for sub_key, ex in sub_generator(*sub_generator_args):
845
  if not all(ex.values()):
846
  continue
847
  # TODO(adarob): Add subset feature.
848
  # ex["subset"] = subset
849
+ key = f"{ss_name}/{sub_key}"
850
  if with_translation is True:
851
  ex = {"translation": ex}
852
  yield key, ex
853
 
854
 
855
+ def _parse_parallel_sentences(f1, f2, filename1, filename2):
856
  """Returns examples from parallel SGML or text files, which may be gzipped."""
857
 
858
+ def _parse_text(path, original_filename):
859
  """Returns the sentences from a single text file, which may be gzipped."""
860
+ split_path = original_filename.split(".")
861
 
862
  if split_path[-1] == "gz":
863
  lang = split_path[-2]
864
+
865
+ def gen():
866
+ with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
867
+ for line in g:
868
+ yield line.decode("utf-8").rstrip()
869
+
870
+ return gen(), lang
871
 
872
  if split_path[-1] == "txt":
873
  # CWMT
 
875
  lang = "zh" if lang in ("ch", "cn") else lang
876
  else:
877
  lang = split_path[-1]
 
 
878
 
879
+ def gen():
880
+ with open(path, "rb") as f:
881
+ for line in f:
882
+ yield line.decode("utf-8").rstrip()
883
+
884
+ return gen(), lang
885
+
886
+ def _parse_sgm(path, original_filename):
887
  """Returns sentences from a single SGML file."""
888
+ lang = original_filename.split(".")[-2]
 
889
  # Note: We can't use the XML parser since some of the files are badly
890
  # formatted.
891
  seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
 
 
 
 
 
 
 
892
 
893
+ def gen():
894
+ with open(path, encoding="utf-8") as f:
895
+ for line in f:
896
+ seg_match = re.match(seg_re, line)
897
+ if seg_match:
898
+ assert len(seg_match.groups()) == 1
899
+ yield seg_match.groups()[0]
900
+
901
+ return gen(), lang
902
+
903
+ parse_file = _parse_sgm if os.path.basename(f1).endswith(".sgm") else _parse_text
904
 
905
  # Some datasets (e.g., CWMT) contain multiple parallel files specified with
906
  # a wildcard. We sort both sets to align them and parse them one by one.
 
916
  )
917
 
918
  for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
919
+ l1_sentences, l1 = parse_file(f1_i, filename1)
920
+ l2_sentences, l2 = parse_file(f2_i, filename2)
 
 
 
 
 
 
 
921
 
922
  for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
923
+ key = f"{f_id}/{line_id}"
924
  yield key, {l1: s1, l2: s2}
925
 
926
 
927
  def _parse_frde_bitext(fr_path, de_path):
928
+ with open(fr_path, encoding="utf-8") as fr_f:
929
+ with open(de_path, encoding="utf-8") as de_f:
930
+ for line_id, (s1, s2) in enumerate(zip(fr_f, de_f)):
931
+ yield line_id, {"fr": s1.rstrip(), "de": s2.rstrip()}
 
 
 
 
 
 
 
 
932
 
933
 
934
  def _parse_tmx(path):
 
954
  elem.clear()
955
 
956
 
957
+ def _parse_tsv(path, filename, language_pair=None):
958
  """Generates examples from TSV file."""
959
  if language_pair is None:
960
+ lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", filename)
961
+ assert lang_match is not None, "Invalid TSV filename: %s" % filename
962
  l1, l2 = lang_match.groups()
963
  else:
964
  l1, l2 = language_pair
 
1005
  block_match = re.match(re_block, id_)
1006
  if block_match and block_match.groups()[0] in bad_blocks:
1007
  continue
1008
+ sub_key = f"{filename}/{line_id}"
1009
  yield sub_key, {
1010
  "cs": cs.strip(),
1011
  "en": en.strip(),