Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
MultiPL-E / dataset_infos.json
arjunguha's picture
Initial commit
e99a7c1
raw
history blame
165 kB
{"cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "cpp-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217792, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/cpp-keep.json": {"num_bytes": 248493, "checksum": "56d81141f7b29c237796e14173b8e2884e97d27a8d57c3644a237c09f59227b4"}}, "download_size": 248493, "post_processing_size": null, "dataset_size": 217792, "size_in_bytes": 466285}, "cpp-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "cpp-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239517, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/cpp-transform.json": {"num_bytes": 270773, "checksum": "cb154fc45bef323590b79bb70c14aba4bad59b6a2180615d8937485d41a93d1e"}}, "download_size": 270773, "post_processing_size": null, "dataset_size": 239517, "size_in_bytes": 510290}, "cpp-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "cpp-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "cpp-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "cpp-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 198566, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/cpp-remove.json": {"num_bytes": 227555, "checksum": "729a5a6e1d68668554f77de56ef17b44eab57beea03f2fb920c075cb4f6a905f"}}, "download_size": 227555, "post_processing_size": null, "dataset_size": 198566, "size_in_bytes": 426121}, "cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "cs-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259874, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/cs-keep.json": {"num_bytes": 291137, "checksum": "db62ab52665a2742d0bef4de662ca187a703227083881177dad4f2712da5199a"}}, "download_size": 291137, "post_processing_size": null, "dataset_size": 259874, "size_in_bytes": 551011}, "cs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "cs-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283738, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/cs-transform.json": {"num_bytes": 315563, "checksum": "505f4892388ede789dd09a256c3dbc801549c8d1d372fa60b4db339fe09d6319"}}, "download_size": 315563, "post_processing_size": null, "dataset_size": 283738, "size_in_bytes": 599301}, "cs-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "cs-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "cs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "cs-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237663, "num_examples": 155, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/cs-remove.json": {"num_bytes": 267251, "checksum": "8e2295c157152f2105d805dc06b26ab91e31000cdc8710f31e693bc65de1b753"}}, "download_size": 267251, "post_processing_size": null, "dataset_size": 237663, "size_in_bytes": 504914}, "d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "d-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 175592, "num_examples": 156, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/d-keep.json": {"num_bytes": 209568, "checksum": "e34578f5aabf7a3664eee62f77b00cc908c3db8a6a7aeb071965de247f9750e7"}}, "download_size": 209568, "post_processing_size": null, "dataset_size": 175592, "size_in_bytes": 385160}, "d-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "d-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181121, "num_examples": 156, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/d-transform.json": {"num_bytes": 215649, "checksum": "a9d182b3a60e4f951e2235f2a4157b91f518623b6ae21260e1d5d6703cf77a78"}}, "download_size": 215649, "post_processing_size": null, "dataset_size": 181121, "size_in_bytes": 396770}, "d-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "d-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/d-reworded.json": {"num_bytes": 215824, "checksum": "6a021fd31c45c3f68742f7d60d27082d45d17229daae221d46c70ace9d61bc2b"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "d-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "d-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 157938, "num_examples": 153, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/d-remove.json": {"num_bytes": 190211, "checksum": "9a36e460e3f0e7fcb92fa6d9f1da5e9d62cf5ee6787af73468bb2a54dada295a"}}, "download_size": 190211, "post_processing_size": null, "dataset_size": 157938, "size_in_bytes": 348149}, "go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "go-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 241130, "num_examples": 154, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/go-keep.json": {"num_bytes": 280424, "checksum": "6de07406cbf81f3a6d0199ec9fc85eaf78a20d9954f8f3ea22e7d1b2fa9a92b6"}}, "download_size": 280424, "post_processing_size": null, "dataset_size": 241130, "size_in_bytes": 521554}, "go-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "go-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247448, "num_examples": 154, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/go-transform.json": {"num_bytes": 287275, "checksum": "084a15fb951dd89dc33a06cf49acaf2610ee0e2de0c9f8d1325b08a4a88b2ebc"}}, "download_size": 287275, "post_processing_size": null, "dataset_size": 247448, "size_in_bytes": 534723}, "go-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "go-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "go-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "go-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 221519, "num_examples": 151, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/go-remove.json": {"num_bytes": 258980, "checksum": "e4bbf884adf71965e8b0978ff20ff779de60f50bd7da8912b620b713de3bc376"}}, "download_size": 258980, "post_processing_size": null, "dataset_size": 221519, "size_in_bytes": 480499}, "java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "java-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259836, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/java-keep.json": {"num_bytes": 291099, "checksum": "7bf1559d86c8a92fd15b4ed812d885c99c50551f392b2ad816a8e7060527e89c"}}, "download_size": 291099, "post_processing_size": null, "dataset_size": 259836, "size_in_bytes": 550935}, "java-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "java-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 286548, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/java-transform.json": {"num_bytes": 318373, "checksum": "b5da36d56612e80384d9e6a46407241934730d3ba5bca98c5e7ccfb112f9d628"}}, "download_size": 318373, "post_processing_size": null, "dataset_size": 286548, "size_in_bytes": 604921}, "java-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "java-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "java-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "java-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237672, "num_examples": 155, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/java-remove.json": {"num_bytes": 267260, "checksum": "a6c69545169e760eb802d953af94dde684146430b281d43ffa98f72f1416a34d"}}, "download_size": 267260, "post_processing_size": null, "dataset_size": 237672, "size_in_bytes": 504932}, "jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "jl-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163708, "num_examples": 159, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/jl-keep.json": {"num_bytes": 198696, "checksum": "7fa3f79aa3d56fadae3414684f0f102f87d529099d84a6f5d30a652714419d7b"}}, "download_size": 198696, "post_processing_size": null, "dataset_size": 163708, "size_in_bytes": 362404}, "jl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "jl-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167969, "num_examples": 159, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/jl-transform.json": {"num_bytes": 203514, "checksum": "255731ab55a8eb128bcf6b3ececbd0dcd5fcb087753b830f148788c53ebfee8e"}}, "download_size": 203514, "post_processing_size": null, "dataset_size": 167969, "size_in_bytes": 371483}, "jl-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "jl-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "jl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "jl-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 145913, "num_examples": 156, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/jl-remove.json": {"num_bytes": 179158, "checksum": "221e77ae9a1c3c3ab95d0c5010b119f9fd6f1fea9afaa79e5cf033f9a62e9d11"}}, "download_size": 179158, "post_processing_size": null, "dataset_size": 145913, "size_in_bytes": 325071}, "js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "js-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177635, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/js-keep.json": {"num_bytes": 211822, "checksum": "02e56da39247f31c4f399a62210fdbe97bb45f6ec239140c3985432b72485bf2"}}, "download_size": 211822, "post_processing_size": null, "dataset_size": 177635, "size_in_bytes": 389457}, "js-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "js-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181987, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/js-transform.json": {"num_bytes": 216729, "checksum": "d90db81d52580d6d21cca9b16662fdac11b4ff5f2b50521652014c3c4d66b9c0"}}, "download_size": 216729, "post_processing_size": null, "dataset_size": 181987, "size_in_bytes": 398716}, "js-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "js-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "js-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "js-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158619, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/js-remove.json": {"num_bytes": 191028, "checksum": "8b0d17122dac1a1efef793d71e73473892aba8c8ebf8bf2238e4be8f7cd2685d"}}, "download_size": 191028, "post_processing_size": null, "dataset_size": 158619, "size_in_bytes": 349647}, "lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "lua-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180398, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/lua-keep.json": {"num_bytes": 212511, "checksum": "fb7466e8b89c92fab70dbd7f0074972cf0c6e970f94f7203c4fa01797af59e67"}}, "download_size": 212511, "post_processing_size": null, "dataset_size": 180398, "size_in_bytes": 392909}, "lua-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "lua-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184763, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/lua-transform.json": {"num_bytes": 216595, "checksum": "fba904e9325bb59360bb4e583f796bce78587695db92c6a4b4145a6bbb8778df"}}, "download_size": 216595, "post_processing_size": null, "dataset_size": 184763, "size_in_bytes": 401358}, "lua-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "lua-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "lua-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "lua-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 161339, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/lua-remove.json": {"num_bytes": 191690, "checksum": "e12d5519c6f740d9341136043e93f42986a13b7f00a64c393592bca83400f45e"}}, "download_size": 191690, "post_processing_size": null, "dataset_size": 161339, "size_in_bytes": 353029}, "php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "php-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 219526, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/php-keep.json": {"num_bytes": 256134, "checksum": "6e8bbef0effb50396b752e4e2ee3cd42e9f1edcf253e684dffe0d60efd447af4"}}, "download_size": 256134, "post_processing_size": null, "dataset_size": 219526, "size_in_bytes": 475660}, "php-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "php-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225575, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/php-transform.json": {"num_bytes": 262738, "checksum": "113c46223db9f1235ba2f0a390a0f01a9775400a671537e70755ea471e99088c"}}, "download_size": 262738, "post_processing_size": null, "dataset_size": 225575, "size_in_bytes": 488313}, "php-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "php-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "php-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "php-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200047, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/php-remove.json": {"num_bytes": 234848, "checksum": "3b13b33434a08c9bcff8db2a72e3ec89c85a794b8c1ca576a10614693d3b27b0"}}, "download_size": 234848, "post_processing_size": null, "dataset_size": 200047, "size_in_bytes": 434895}, "pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "pl-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239874, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/pl-keep.json": {"num_bytes": 279351, "checksum": "116f82cec38a8a9f38bd14bbd9348d18f13879a98c293c7ce9ff38829da8bf3f"}}, "download_size": 279351, "post_processing_size": null, "dataset_size": 239874, "size_in_bytes": 519225}, "pl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "pl-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243611, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/pl-transform.json": {"num_bytes": 283767, "checksum": "552decb4ad799ae7204b0434600d0a7b1b2136dc34dbaa1a3e6ca7acb681173e"}}, "download_size": 283767, "post_processing_size": null, "dataset_size": 243611, "size_in_bytes": 527378}, "pl-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "pl-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "pl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "pl-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 220817, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/pl-remove.json": {"num_bytes": 258463, "checksum": "94723d826be5a900f975ffd97039dba9de878945f6d81fa0a59bdebed5c87ef6"}}, "download_size": 258463, "post_processing_size": null, "dataset_size": 220817, "size_in_bytes": 479280}, "py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "py-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 173537, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/py-keep.json": {"num_bytes": 207009, "checksum": "c583508bfd9ca7f7d8730f7cf618cd5d0fb4d2000f48d39d5311b4eeb06fb6a3"}}, "download_size": 207009, "post_processing_size": null, "dataset_size": 173537, "size_in_bytes": 380546}, "py-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "py-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/py-transform.json": {"num_bytes": 210975, "checksum": "9518a25d142569e8adf490d2cf6ed0df3ed16663991f73900d8477152f9a00c3"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "py-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "py-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "py-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "py-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 155389, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/py-remove.json": {"num_bytes": 187068, "checksum": "491dc22f69bd7e4098c9b927addec8a3f9e7f0a7f93bac655bdc4440c26008a1"}}, "download_size": 187068, "post_processing_size": null, "dataset_size": 155389, "size_in_bytes": 342457}, "r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "r-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186803, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/r-keep.json": {"num_bytes": 215857, "checksum": "efd573dd3afcf7e6bdbea508dda54067e73777fc0d2e9e6570a52dfda63aa0fa"}}, "download_size": 215857, "post_processing_size": null, "dataset_size": 186803, "size_in_bytes": 402660}, "r-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "r-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191732, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/r-transform.json": {"num_bytes": 220505, "checksum": "5a7b5f28ae59eec006d012623f594c9143fe9854487bd98817ed075d4d2abb97"}}, "download_size": 220505, "post_processing_size": null, "dataset_size": 191732, "size_in_bytes": 412237}, "r-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "r-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "r-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "r-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168422, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/r-remove.json": {"num_bytes": 195771, "checksum": "32085e69d9f3975f38ce336e8e90b34124b19b8d581cdf7d0c5c902c14d6f012"}}, "download_size": 195771, "post_processing_size": null, "dataset_size": 168422, "size_in_bytes": 364193}, "rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rb-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181999, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rb-keep.json": {"num_bytes": 216186, "checksum": "d8e86b7408460ff14841666c7514971db6092cdd1b5565d629bf908a71046ba1"}}, "download_size": 216186, "post_processing_size": null, "dataset_size": 181999, "size_in_bytes": 398185}, "rb-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rb-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188317, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rb-transform.json": {"num_bytes": 223059, "checksum": "b53abcc9538e2c743d5bfc0e86f18e0832e6ec0dbd611a98566b05950436d31c"}}, "download_size": 223059, "post_processing_size": null, "dataset_size": 188317, "size_in_bytes": 411376}, "rb-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rb-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "rb-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rb-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163569, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rb-remove.json": {"num_bytes": 195978, "checksum": "02488606f2897203cf131aeb57eec365b93ecb0e7dd7a73d048890f0fd060e72"}}, "download_size": 195978, "post_processing_size": null, "dataset_size": 163569, "size_in_bytes": 359547}, "rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rkt-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177757, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rkt-keep.json": {"num_bytes": 212266, "checksum": "7086c9ca18882c7f0a18a4b46dfe84c0b5293b69a4c9d8964ad72a797ad72871"}}, "download_size": 212266, "post_processing_size": null, "dataset_size": 177757, "size_in_bytes": 390023}, "rkt-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rkt-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182937, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rkt-transform.json": {"num_bytes": 218001, "checksum": "360afce46e550266f91f096d22e8a5e31e3b7f234c1d465a45c72a82ef2bda17"}}, "download_size": 218001, "post_processing_size": null, "dataset_size": 182937, "size_in_bytes": 400938}, "rkt-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rkt-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "rkt-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rkt-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158729, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rkt-remove.json": {"num_bytes": 191454, "checksum": "4b9e8bd27090d5d21882ac505f579d0825b079af5769c3ca9d8e7585e0e7005a"}}, "download_size": 191454, "post_processing_size": null, "dataset_size": 158729, "size_in_bytes": 350183}, "rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rs-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177191, "num_examples": 156, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rs-keep.json": {"num_bytes": 206604, "checksum": "d5960e79973aea8bc30d276d5aa8c2750d336b80ff26be4ecc93495a77fd597b"}}, "download_size": 206604, "post_processing_size": null, "dataset_size": 177191, "size_in_bytes": 383795}, "rs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rs-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188587, "num_examples": 156, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rs-transform.json": {"num_bytes": 218555, "checksum": "1cd4f2931c17a8d9ee3aa8e646b818f2f2d5981b252639ff723d34ea5a13f973"}}, "download_size": 218555, "post_processing_size": null, "dataset_size": 188587, "size_in_bytes": 407142}, "rs-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rs-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "rs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "rs-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158191, "num_examples": 153, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/rs-remove.json": {"num_bytes": 185991, "checksum": "064b21353df32e13ad02e7bf68b9a977f78000b632b73828487f5d47a0a9c610"}}, "download_size": 185991, "post_processing_size": null, "dataset_size": 158191, "size_in_bytes": 344182}, "scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "scala-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 222118, "num_examples": 160, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/scala-keep.json": {"num_bytes": 253027, "checksum": "eb90cccebedf54864fa5fe487141d5467962aecd05d1eee25403a0369e6ffde6"}}, "download_size": 253027, "post_processing_size": null, "dataset_size": 222118, "size_in_bytes": 475145}, "scala-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "scala-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240540, "num_examples": 160, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/scala-transform.json": {"num_bytes": 272012, "checksum": "48669c1583008ffdd607006c3d4d0df65c0be452b1b7fa5429d15b4739495b34"}}, "download_size": 272012, "post_processing_size": null, "dataset_size": 240540, "size_in_bytes": 512552}, "scala-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "scala-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "scala-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "scala-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200261, "num_examples": 157, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/scala-remove.json": {"num_bytes": 229477, "checksum": "1fc1cc45643a50b0a54e467506582d72c8a7ff1124d07502599f6d16cb51fa93"}}, "download_size": 229477, "post_processing_size": null, "dataset_size": 200261, "size_in_bytes": 429738}, "sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "sh-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158460, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/sh-keep.json": {"num_bytes": 193268, "checksum": "4f7240af8ed75b8448061713aa5e92352119b8db4618f0da4378ecd78478d81a"}}, "download_size": 193268, "post_processing_size": null, "dataset_size": 158460, "size_in_bytes": 351728}, "sh-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "sh-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164552, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/sh-transform.json": {"num_bytes": 201631, "checksum": "961c6ce6bf00bb9422c809065fc185da86fb5eadf2d87a40f29f63b855fc032e"}}, "download_size": 201631, "post_processing_size": null, "dataset_size": 164552, "size_in_bytes": 366183}, "sh-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "sh-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "sh-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "sh-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 140720, "num_examples": 155, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/sh-remove.json": {"num_bytes": 173767, "checksum": "0e3e37a23e2a2183ead389b70d46a487a31a96e82de8cc3fb1bf7f43d2ae00d9"}}, "download_size": 173767, "post_processing_size": null, "dataset_size": 140720, "size_in_bytes": 314487}, "swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "swift-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 201798, "num_examples": 161, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/swift-keep.json": {"num_bytes": 233903, "checksum": "2f47aae44c26a505bce9a7c456377c015ddb35952017f626cac03c0cd6655642"}}, "download_size": 233903, "post_processing_size": null, "dataset_size": 201798, "size_in_bytes": 435701}, "swift-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "swift-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204760, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/swift-transform.json": {"num_bytes": 236660, "checksum": "c0b76d009ffc75e26040f13c511e78bdfdb4fafe7743fbc2b1315173e638c438"}}, "download_size": 236660, "post_processing_size": null, "dataset_size": 204760, "size_in_bytes": 441420}, "swift-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "swift-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "swift-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "swift-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181681, "num_examples": 158, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/swift-remove.json": {"num_bytes": 212047, "checksum": "9c5aadcab3e2bed9592808321c2f5abbf18c257b71b329bc41689c4a54972ead"}}, "download_size": 212047, "post_processing_size": null, "dataset_size": 181681, "size_in_bytes": 393728}, "ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "ts-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181763, "num_examples": 159, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/ts-keep.json": {"num_bytes": 215589, "checksum": "bea4e1776118c9bb9f3211deeaa6ce03dde208031b8d90f533f7d5b1d7bb5830"}}, "download_size": 215589, "post_processing_size": null, "dataset_size": 181763, "size_in_bytes": 397352}, "ts-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "ts-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186037, "num_examples": 159, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/ts-transform.json": {"num_bytes": 220423, "checksum": "6081b604f3673a39bd5e8fc68a67977a3855f477cdfc1431a6cf0e2fb0be00bf"}}, "download_size": 220423, "post_processing_size": null, "dataset_size": 186037, "size_in_bytes": 406460}, "ts-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "ts-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "ts-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multipl_e", "config_name": "ts-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 162881, "num_examples": 156, "dataset_name": "multipl_e"}}, "download_checksums": {"./data/ts-remove.json": {"num_bytes": 194985, "checksum": "7a98910e983f01a13325280b3d9d383bbd1454eced4b5b08b4f7da9daf781f32"}}, "download_size": 194985, "post_processing_size": null, "dataset_size": 162881, "size_in_bytes": 357866}}