Datasets:

Languages:
English
ArXiv:
License:
File size: 5,948 Bytes
7db51ab
 
 
 
 
 
 
 
 
e61d63a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7db51ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e61d63a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7db51ab
 
b56b84d
7db51ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32e710e
7db51ab
 
 
32e710e
7db51ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
import json
import os
import requests
import datasets

import os
from collections import defaultdict

_CITATION = """\
@article{mbxp_athiwaratkun2022,
  title = {Multi-lingual Evaluation of Code Generation Models},
  author = {Athiwaratkun, Ben and
   Gouda, Sanjay Krishna and
   Wang, Zijian and
   Li, Xiaopeng and
   Tian, Yuchen and
   Tan, Ming
   and Ahmad, Wasi Uddin and
   Wang, Shiqi and
   Sun, Qing and
   Shang, Mingyue and
   Gonugondla, Sujan Kumar and
   Ding, Hantian and
   Kumar, Varun and
   Fulton, Nathan and
   Farahani, Arash and
   Jain, Siddhartha and
   Giaquinto, Robert and
   Qian, Haifeng and
   Ramanathan, Murali Krishna and
   Nallapati, Ramesh and
   Ray, Baishakhi and
   Bhatia, Parminder and
   Sengupta, Sudipta and
   Roth, Dan and
   Xiang, Bing},
  doi = {10.48550/ARXIV.2210.14868},
  url = {https://arxiv.org/abs/2210.14868},
  keywords = {Machine Learning (cs.LG), Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
  publisher = {arXiv},
  year = {2022},
  copyright = {Creative Commons Attribution 4.0 International}
}"""

VERSION=f"1.1.0"

_HOMEPAGE = "https://github.com/amazon-science/mbxp-exec-eval"

_LICENSE = "Apache License 2.0"

_DESCRIPTION = """\
A collection of execution-based multi-lingual benchmark for code generation.
"""

_LICENSES = defaultdict(lambda: _LICENSE)

_CITATIONS = defaultdict(lambda: _CITATION)
_CITATIONS["python"] = """\
@inproceedings{amini-etal-2019-mathqa,
    title={MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms},
    author={Amini, Aida  and
      Gabriel, Saadia  and
      Lin, Shanchuan  and
      Koncel-Kedziorski, Rik  and
      Choi, Yejin  and
      Hajishirzi, Hannaneh},
    booktitle={Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)},
    month={jun},
    year= {2019},
    address = {Minneapolis, Minnesota},
    publisher = {Association for Computational Linguistics},
    url={https://aclanthology.org/N19-1245}
    doi={10.18653/v1/N19-1245},
    pages={2357--2367},
}
@article{mbxp_athiwaratkun2022,
  title = {Multi-lingual Evaluation of Code Generation Models},
  author = {Athiwaratkun, Ben and
   Gouda, Sanjay Krishna and
   Wang, Zijian and
   Li, Xiaopeng and
   Tian, Yuchen and
   Tan, Ming
   and Ahmad, Wasi Uddin and
   Wang, Shiqi and
   Sun, Qing and
   Shang, Mingyue and
   Gonugondla, Sujan Kumar and
   Ding, Hantian and
   Kumar, Varun and
   Fulton, Nathan and
   Farahani, Arash and
   Jain, Siddhartha and
   Giaquinto, Robert and
   Qian, Haifeng and
   Ramanathan, Murali Krishna and
   Nallapati, Ramesh and
   Ray, Baishakhi and
   Bhatia, Parminder and
   Sengupta, Sudipta and
   Roth, Dan and
   Xiang, Bing},
  doi = {10.48550/ARXIV.2210.14868},
  url = {https://arxiv.org/abs/2210.14868},
  keywords = {Machine Learning (cs.LG), Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
  publisher = {arXiv},
  year = {2022},
  copyright = {Creative Commons Attribution 4.0 International}
}"""

_GITHUB_ROOT = "https://raw.githubusercontent.com/amazon-science/mbxp-exec-eval/main/data/multilingual_mathqa/"

metadata_dict_path = requests.get(os.path.join(_GITHUB_ROOT, "metadata.json"))
metadata = json.loads(metadata_dict_path.text)

class MathQAXConfig(datasets.BuilderConfig):
    """BuilderConfig for MathQA-X."""

    def __init__(
        self,
        language,
        data_url,
        citation,
        version,
        **kwargs,
    ):
        super(MathQAXConfig, self).__init__(version=datasets.Version(f"{version}", ""), **kwargs)
        self.name = language
        self.data_url = data_url
        self.citation = citation


class MathQAX(datasets.GeneratorBasedBuilder):
    """MathQA-X: An execution-based MathQA-X benchmark for code generation."""

    BUILDER_CONFIGS = [
        MathQAXConfig(
            name=f"{language}",
            language=f"{language}",
            version=VERSION,
            citation=_CITATIONS[f"{language}"],
            description=f"MathQA-X benchmark in {language}",
            data_url=os.path.join(_GITHUB_ROOT, language_path)
        ) for language, language_path in metadata.items()
    ]

    def _info(self):
        self.build_name = self.name
        features = datasets.Features(
            {
                "task_id": datasets.Value("string"),
                "language": datasets.Value("string"),
                "prompt": datasets.Value("string"),
                "description": datasets.Value("string"),
                "test": datasets.Value("string"),
                "entry_point": datasets.Value("string"),
                "canonical_solution": datasets.Value("string"),
                
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSES[self.config.name],
            citation=_CITATIONS[self.config.name],
        )


    def _split_generators(
            self, dl_manager
    ):
        """Returns SplitGenerators."""
        data_file = dl_manager.download_and_extract(url_or_urls=self.config.data_url)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": data_file,
                },
            )
        ]

    
    def _generate_examples(self, filepath):
        """Yields examples."""
        with open(filepath) as file:
            data = []
            for line in file:
                jd = json.loads(line)
                data.append(jd)
            id_ = 0
            for sample in data:
                yield id_, sample
                id_ += 1