mlqa / dataset_infos.json
system's picture
system HF staff
Update files from the datasets library (from 1.0.2)
7ffe9b1
raw
history blame
114 kB
{"mlqa-translate-train.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-train.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 101227245, "num_examples": 78058, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 13144332, "num_examples": 9512, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-train.tar.gz": {"num_bytes": 63364123, "checksum": "e510277b0bc3c0639f53839aba8591aebfe803b8b84c5c2ef0513768b86b7cf9"}}, "download_size": 63364123, "dataset_size": 114371577, "size_in_bytes": 177735700}, "mlqa-translate-train.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-train.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 77996825, "num_examples": 80069, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 10322113, "num_examples": 9927, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-train.tar.gz": {"num_bytes": 63364123, "checksum": "e510277b0bc3c0639f53839aba8591aebfe803b8b84c5c2ef0513768b86b7cf9"}}, "download_size": 63364123, "dataset_size": 88318938, "size_in_bytes": 151683061}, "mlqa-translate-train.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-train.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 97387431, "num_examples": 84816, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 12731112, "num_examples": 10356, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-train.tar.gz": {"num_bytes": 63364123, "checksum": "e510277b0bc3c0639f53839aba8591aebfe803b8b84c5c2ef0513768b86b7cf9"}}, "download_size": 63364123, "dataset_size": 110118543, "size_in_bytes": 173482666}, "mlqa-translate-train.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-train.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 55143547, "num_examples": 76285, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 7418070, "num_examples": 9568, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-train.tar.gz": {"num_bytes": 63364123, "checksum": "e510277b0bc3c0639f53839aba8591aebfe803b8b84c5c2ef0513768b86b7cf9"}}, "download_size": 63364123, "dataset_size": 62561617, "size_in_bytes": 125925740}, "mlqa-translate-train.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-train.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 80789653, "num_examples": 81810, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 10718376, "num_examples": 10123, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-train.tar.gz": {"num_bytes": 63364123, "checksum": "e510277b0bc3c0639f53839aba8591aebfe803b8b84c5c2ef0513768b86b7cf9"}}, "download_size": 63364123, "dataset_size": 91508029, "size_in_bytes": 154872152}, "mlqa-translate-train.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-train.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 168117671, "num_examples": 82451, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 22422152, "num_examples": 10253, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-train.tar.gz": {"num_bytes": 63364123, "checksum": "e510277b0bc3c0639f53839aba8591aebfe803b8b84c5c2ef0513768b86b7cf9"}}, "download_size": 63364123, "dataset_size": 190539823, "size_in_bytes": 253903946}, "mlqa-translate-test.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-test.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5484467, "num_examples": 5335, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-test.tar.gz": {"num_bytes": 10075488, "checksum": "d6539141c529849d09e9d5ab95127a20e238e6b24081fa3abbc671af340ee4e7"}}, "download_size": 10075488, "dataset_size": 5484467, "size_in_bytes": 15559955}, "mlqa-translate-test.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-test.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3884332, "num_examples": 4517, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-test.tar.gz": {"num_bytes": 10075488, "checksum": "d6539141c529849d09e9d5ab95127a20e238e6b24081fa3abbc671af340ee4e7"}}, "download_size": 10075488, "dataset_size": 3884332, "size_in_bytes": 13959820}, "mlqa-translate-test.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-test.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5998327, "num_examples": 5495, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-test.tar.gz": {"num_bytes": 10075488, "checksum": "d6539141c529849d09e9d5ab95127a20e238e6b24081fa3abbc671af340ee4e7"}}, "download_size": 10075488, "dataset_size": 5998327, "size_in_bytes": 16073815}, "mlqa-translate-test.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-test.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4831704, "num_examples": 5137, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-test.tar.gz": {"num_bytes": 10075488, "checksum": "d6539141c529849d09e9d5ab95127a20e238e6b24081fa3abbc671af340ee4e7"}}, "download_size": 10075488, "dataset_size": 4831704, "size_in_bytes": 14907192}, "mlqa-translate-test.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-test.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3916758, "num_examples": 5253, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-test.tar.gz": {"num_bytes": 10075488, "checksum": "d6539141c529849d09e9d5ab95127a20e238e6b24081fa3abbc671af340ee4e7"}}, "download_size": 10075488, "dataset_size": 3916758, "size_in_bytes": 13992246}, "mlqa-translate-test.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa-translate-test.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4608811, "num_examples": 4918, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/mlqa-translate-test.tar.gz": {"num_bytes": 10075488, "checksum": "d6539141c529849d09e9d5ab95127a20e238e6b24081fa3abbc671af340ee4e7"}}, "download_size": 10075488, "dataset_size": 4608811, "size_in_bytes": 14684299}, "mlqa.ar.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.ar.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 8216837, "num_examples": 5335, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 808830, "num_examples": 517, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 9025667, "size_in_bytes": 84744717}, "mlqa.ar.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.ar.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2132247, "num_examples": 1649, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 358554, "num_examples": 207, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 2490801, "size_in_bytes": 78209851}, "mlqa.ar.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.ar.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3235363, "num_examples": 2047, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 283834, "num_examples": 163, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 3519197, "size_in_bytes": 79238247}, "mlqa.ar.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.ar.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3175660, "num_examples": 1912, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 334016, "num_examples": 188, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 3509676, "size_in_bytes": 79228726}, "mlqa.ar.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.ar.en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 8074057, "num_examples": 5335, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 794775, "num_examples": 517, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 8868832, "size_in_bytes": 84587882}, "mlqa.ar.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.ar.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2981237, "num_examples": 1978, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 223188, "num_examples": 161, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 3204425, "size_in_bytes": 78923475}, "mlqa.ar.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.ar.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2993225, "num_examples": 1831, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 276727, "num_examples": 186, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 3269952, "size_in_bytes": 78989002}, "mlqa.de.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.de.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1587005, "num_examples": 1649, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 195822, "num_examples": 207, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1782827, "size_in_bytes": 77501877}, "mlqa.de.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.de.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4274496, "num_examples": 4517, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 477366, "num_examples": 512, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4751862, "size_in_bytes": 80470912}, "mlqa.de.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.de.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1654540, "num_examples": 1675, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 211985, "num_examples": 182, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1866525, "size_in_bytes": 77585575}, "mlqa.de.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.de.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1645937, "num_examples": 1621, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 180114, "num_examples": 190, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1826051, "size_in_bytes": 77545101}, "mlqa.de.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.de.en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4251153, "num_examples": 4517, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 474863, "num_examples": 512, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4726016, "size_in_bytes": 80445066}, "mlqa.de.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.de.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1678176, "num_examples": 1776, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 166193, "num_examples": 196, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1844369, "size_in_bytes": 77563419}, "mlqa.de.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.de.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1343983, "num_examples": 1430, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 150679, "num_examples": 163, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1494662, "size_in_bytes": 77213712}, "mlqa.vi.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.vi.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3164094, "num_examples": 2047, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 226724, "num_examples": 163, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 3390818, "size_in_bytes": 79109868}, "mlqa.vi.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.vi.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2189315, "num_examples": 1675, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 272794, "num_examples": 182, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 2462109, "size_in_bytes": 78181159}, "mlqa.vi.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.vi.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7807045, "num_examples": 5495, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 715291, "num_examples": 511, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 8522336, "size_in_bytes": 84241386}, "mlqa.vi.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.vi.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2947458, "num_examples": 1943, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 265154, "num_examples": 184, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 3212612, "size_in_bytes": 78931662}, "mlqa.vi.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.vi.en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7727204, "num_examples": 5495, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 707925, "num_examples": 511, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 8435129, "size_in_bytes": 84154179}, "mlqa.vi.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.vi.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2822481, "num_examples": 2018, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 279235, "num_examples": 189, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 3101716, "size_in_bytes": 78820766}, "mlqa.vi.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.vi.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2738045, "num_examples": 1947, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 251470, "num_examples": 177, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 2989515, "size_in_bytes": 78708565}, "mlqa.zh.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.zh.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1697005, "num_examples": 1912, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 171743, "num_examples": 188, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1868748, "size_in_bytes": 77587798}, "mlqa.zh.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.zh.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1356268, "num_examples": 1621, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 170686, "num_examples": 190, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1526954, "size_in_bytes": 77246004}, "mlqa.zh.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.zh.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1770535, "num_examples": 1943, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 169651, "num_examples": 184, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1940186, "size_in_bytes": 77659236}, "mlqa.zh.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.zh.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4324740, "num_examples": 5137, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 433960, "num_examples": 504, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4758700, "size_in_bytes": 80477750}, "mlqa.zh.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.zh.en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4353361, "num_examples": 5137, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 437016, "num_examples": 504, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4790377, "size_in_bytes": 80509427}, "mlqa.zh.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.zh.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1697983, "num_examples": 1947, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 134693, "num_examples": 161, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1832676, "size_in_bytes": 77551726}, "mlqa.zh.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.zh.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1547159, "num_examples": 1767, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 180928, "num_examples": 189, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1728087, "size_in_bytes": 77447137}, "mlqa.en.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.en.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6641971, "num_examples": 5335, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 621075, "num_examples": 517, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 7263046, "size_in_bytes": 82982096}, "mlqa.en.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.en.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4966262, "num_examples": 4517, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 584725, "num_examples": 512, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 5550987, "size_in_bytes": 81270037}, "mlqa.en.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.en.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6958087, "num_examples": 5495, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 631268, "num_examples": 511, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 7589355, "size_in_bytes": 83308405}, "mlqa.en.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.en.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6441614, "num_examples": 5137, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 598772, "num_examples": 504, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 7040386, "size_in_bytes": 82759436}, "mlqa.en.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.en.en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 13787522, "num_examples": 11590, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 1307399, "num_examples": 1148, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 15094921, "size_in_bytes": 90813971}, "mlqa.en.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.en.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6074990, "num_examples": 5253, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 545657, "num_examples": 500, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 6620647, "size_in_bytes": 82339697}, "mlqa.en.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.en.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6293785, "num_examples": 4918, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 614223, "num_examples": 507, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 6908008, "size_in_bytes": 82627058}, "mlqa.es.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.es.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1696778, "num_examples": 1978, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 145105, "num_examples": 161, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1841883, "size_in_bytes": 77560933}, "mlqa.es.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.es.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1361983, "num_examples": 1776, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 139968, "num_examples": 196, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1501951, "size_in_bytes": 77221001}, "mlqa.es.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.es.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1707141, "num_examples": 2018, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 172801, "num_examples": 189, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1879942, "size_in_bytes": 77598992}, "mlqa.es.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.es.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1635294, "num_examples": 1947, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 122829, "num_examples": 161, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1758123, "size_in_bytes": 77477173}, "mlqa.es.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.es.en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4249431, "num_examples": 5253, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 408169, "num_examples": 500, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4657600, "size_in_bytes": 80376650}, "mlqa.es.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.es.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4281273, "num_examples": 5253, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 411196, "num_examples": 500, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4692469, "size_in_bytes": 80411519}, "mlqa.es.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.es.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1489611, "num_examples": 1723, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 178003, "num_examples": 187, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 1667614, "size_in_bytes": 77386664}, "mlqa.hi.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.hi.ar", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4374373, "num_examples": 1831, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 402817, "num_examples": 186, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4777190, "size_in_bytes": 80496240}, "mlqa.hi.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.hi.de", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2961556, "num_examples": 1430, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 294325, "num_examples": 163, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 3255881, "size_in_bytes": 78974931}, "mlqa.hi.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.hi.vi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4664436, "num_examples": 1947, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 411654, "num_examples": 177, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 5076090, "size_in_bytes": 80795140}, "mlqa.hi.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.hi.zh", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4281309, "num_examples": 1767, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 416192, "num_examples": 189, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4697501, "size_in_bytes": 80416551}, "mlqa.hi.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.hi.en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11245629, "num_examples": 4918, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 1076115, "num_examples": 507, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 12321744, "size_in_bytes": 88040794}, "mlqa.hi.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.hi.es", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3789337, "num_examples": 1723, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 412469, "num_examples": 187, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 4201806, "size_in_bytes": 79920856}, "mlqa.hi.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\n MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\n German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between \n 4 different languages on average.\n", "citation": "@article{lewis2019mlqa,\n title={MLQA: Evaluating Cross-lingual Extractive Question Answering},\n author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\n journal={arXiv preprint arXiv:1910.07475},\n year={2019}\n}\n", "homepage": "https://github.com/facebookresearch/MLQA", "license": "", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "mlqa", "config_name": "mlqa.hi.hi", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11606982, "num_examples": 4918, "dataset_name": "mlqa"}, "validation": {"name": "validation", "num_bytes": 1115055, "num_examples": 507, "dataset_name": "mlqa"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "dataset_size": 12722037, "size_in_bytes": 88441087}}