import json import os import requests import datasets import os from collections import defaultdict _CITATION = """\ @article{mbxp_athiwaratkun2022, title = {Multi-lingual Evaluation of Code Generation Models}, author = {Athiwaratkun, Ben and Gouda, Sanjay Krishna and Wang, Zijian and Li, Xiaopeng and Tian, Yuchen and Tan, Ming and Ahmad, Wasi Uddin and Wang, Shiqi and Sun, Qing and Shang, Mingyue and Gonugondla, Sujan Kumar and Ding, Hantian and Kumar, Varun and Fulton, Nathan and Farahani, Arash and Jain, Siddhartha and Giaquinto, Robert and Qian, Haifeng and Ramanathan, Murali Krishna and Nallapati, Ramesh and Ray, Baishakhi and Bhatia, Parminder and Sengupta, Sudipta and Roth, Dan and Xiang, Bing}, doi = {10.48550/ARXIV.2210.14868}, url = {https://arxiv.org/abs/2210.14868}, keywords = {Machine Learning (cs.LG), Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} }""" VERSION=f"1.1.0" _HOMEPAGE = "https://github.com/amazon-science/mbxp-exec-eval" _LICENSE = "Apache License 2.0" _DESCRIPTION = """\ A collection of execution-based multi-lingual benchmark for code generation. """ _LICENSES = defaultdict(lambda: _LICENSE) _CITATIONS = defaultdict(lambda: _CITATION) _CITATIONS["python"] = """\ @inproceedings{amini-etal-2019-mathqa, title={MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms}, author={Amini, Aida and Gabriel, Saadia and Lin, Shanchuan and Koncel-Kedziorski, Rik and Choi, Yejin and Hajishirzi, Hannaneh}, booktitle={Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)}, month={jun}, year= {2019}, address = {Minneapolis, Minnesota}, publisher = {Association for Computational Linguistics}, url={https://aclanthology.org/N19-1245} doi={10.18653/v1/N19-1245}, pages={2357--2367}, } @article{mbxp_athiwaratkun2022, title = {Multi-lingual Evaluation of Code Generation Models}, author = {Athiwaratkun, Ben and Gouda, Sanjay Krishna and Wang, Zijian and Li, Xiaopeng and Tian, Yuchen and Tan, Ming and Ahmad, Wasi Uddin and Wang, Shiqi and Sun, Qing and Shang, Mingyue and Gonugondla, Sujan Kumar and Ding, Hantian and Kumar, Varun and Fulton, Nathan and Farahani, Arash and Jain, Siddhartha and Giaquinto, Robert and Qian, Haifeng and Ramanathan, Murali Krishna and Nallapati, Ramesh and Ray, Baishakhi and Bhatia, Parminder and Sengupta, Sudipta and Roth, Dan and Xiang, Bing}, doi = {10.48550/ARXIV.2210.14868}, url = {https://arxiv.org/abs/2210.14868}, keywords = {Machine Learning (cs.LG), Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} }""" _GITHUB_ROOT = "https://raw.githubusercontent.com/amazon-science/mbxp-exec-eval/main/data/multilingual_mathqa/" metadata_dict_path = requests.get(os.path.join(_GITHUB_ROOT, "metadata.json")) metadata = json.loads(metadata_dict_path.text) class MathQAXConfig(datasets.BuilderConfig): """BuilderConfig for MathQA-X.""" def __init__( self, language, data_url, citation, version, **kwargs, ): super(MathQAXConfig, self).__init__(version=datasets.Version(f"{version}", ""), **kwargs) self.name = language self.data_url = data_url self.citation = citation class MathQAX(datasets.GeneratorBasedBuilder): """MathQA-X: An execution-based MathQA-X benchmark for code generation.""" BUILDER_CONFIGS = [ MathQAXConfig( name=f"{language}", language=f"{language}", version=VERSION, citation=_CITATIONS[f"{language}"], description=f"MathQA-X benchmark in {language}", data_url=os.path.join(_GITHUB_ROOT, language_path) ) for language, language_path in metadata.items() ] def _info(self): self.build_name = self.name features = datasets.Features( { "task_id": datasets.Value("string"), "language": datasets.Value("string"), "prompt": datasets.Value("string"), "description": datasets.Value("string"), "test": datasets.Value("string"), "entry_point": datasets.Value("string"), "canonical_solution": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSES[self.config.name], citation=_CITATIONS[self.config.name], ) def _split_generators( self, dl_manager ): """Returns SplitGenerators.""" data_file = dl_manager.download_and_extract(url_or_urls=self.config.data_url) return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": data_file, }, ) ] def _generate_examples(self, filepath): """Yields examples.""" with open(filepath) as file: data = [] for line in file: jd = json.loads(line) data.append(jd) id_ = 0 for sample in data: yield id_, sample id_ += 1