# coding=utf-8 # Copyright 2022 ExeBench authors # The code required to produce and load this dataset is licensed under MIT License. # The code samples included in this dataset keep their own licenses, which can be retrieved via their metadata. # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Please note that the dataset release is still work in progress. """The ExeBench dataset.""" import json import datasets from pathlib import Path _CITATION = """\ @misc{TODO } """ _DESCRIPTION = """\ An ML-scale dataset of executable C functions """ # TODO: expand _HOMEPAGE = "https://github.com/jordiae/exebench" _LICENSE = "Multiple: see each function license (fields 'ref' and 'path')" _URL = "" # "https://huggingface.co/datasets/jordiae/exebench-test/resolve/main/" _REMOVED_FEATURES = ["doc", "angha_error", "real_error", "angha_io_error", "real_io_error", "angha_io_pairs_are_trivial", "real_io_pairs_are_trivial"] _RENAMED_FEATURES = {"angha_deps": "synth_deps", "angha_io_pairs": "synth_io_pairs", "angha_exe_wrapper": "synth_exe_wrapper", "angha_iospec": "synth_iospec"} _FEATURES = datasets.Features( { "path": datasets.Value("string"), "func_def": datasets.Value("string"), "func_head": datasets.Value("string"), "fname": datasets.Value("string"), "signature": datasets.Sequence(datasets.Value("string")), # "doc": datasets.Value("string"), # "angha_error": datasets.Value("string"), # "real_error": datasets.Value("string"), "asm": datasets.Sequence({'target': datasets.Value("string"), 'code': datasets.Value("string")}), # unflat dict#Optional[Dict[str, Optional[FuncAsm]]] = None "synth_deps": datasets.Value("string"), "real_deps": datasets.Value("string"), "synth_io_pairs": datasets.Sequence({ "input": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}), "output": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}), "dummy_funcs": datasets.Value("string"), "dummy_funcs_seed": datasets.Value("int64") }), "real_io_pairs": datasets.Sequence({ "input": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}), "output": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}), "dummy_funcs": datasets.Value("string"), "dummy_funcs_seed": datasets.Value("int64") }), # "angha_io_error": datasets.Value("string"), # "real_io_error": datasets.Value("string"), "synth_exe_wrapper": datasets.Value("string"), "real_exe_wrapper": datasets.Value("string"), # "angha_io_pairs_are_trivial": datasets.Value("bool"), # "real_io_pairs_are_trivial": datasets.Value("bool"), "ref": datasets.Value("string"), "synth_iospec": datasets.Value("string"), # serialized, TODO: improve "real_iospec": datasets.Value("string") } ) class ExeBenchConfig(datasets.BuilderConfig): """BuilderConfig for ExeBench.""" def __init__(self, *args, **kwargs): """BuilderConfig for The Pile. Args: **kwargs: keyword arguments forwarded to super. """ super().__init__( *args, **kwargs, ) class ExeBench(datasets.GeneratorBasedBuilder): """Semantic Textual Similarity Ca dataset.""" BUILDER_CONFIGS = [ ExeBenchConfig( name="ExeBench", version=datasets.Version("1.0.1"), description="Executable C dataset" ), ] def _info(self): """Give information and typings for the dataset.""" return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=_FEATURES, # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=None, # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls_to_download = { # "train_not_compilable": f"{_URL}train_not_compilable.tar.gz", #"train_synth_compilable": f"{_URL}train_synth_compilable.tar.gz", # "train_real_compilable": f"{_URL}train_real_compilable.tar.gz", #"train_synth_simple_io": f"{_URL}train_synth_simple_io.tar.gz", # "train_real_simple_io": f"{_URL}train_real_simple_io.tar.gz", #"train_synth_rich_io": f"{_URL}train_synth_rich_io.tar.gz", #"valid_synth": f"{_URL}valid_synth.tar.gz", # "valid_real": f"{_URL}valid_real.tar.gz", "test_synth": f"{_URL}test_synth.tar.gz", "test_real": f"{_URL}test_real.tar.gz", } downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ #datasets.SplitGenerator(name='train_not_compilable', # gen_kwargs={"files": downloaded_files["train_not_compilable"]}), #datasets.SplitGenerator(name='train_synth_compilable', # gen_kwargs={"files": downloaded_files["train_synth_compilable"]}), #datasets.SplitGenerator(name='train_real_compilable', # gen_kwargs={"files": downloaded_files["train_real_compilable"]}), #datasets.SplitGenerator(name='train_synth_simple_io', # gen_kwargs={"files": downloaded_files["train_synth_simple_io"]}), #datasets.SplitGenerator(name='train_real_simple_io', # gen_kwargs={"files": downloaded_files["train_real_simple_io"]}), #datasets.SplitGenerator(name='train_synth_rich_io', # gen_kwargs={"files": downloaded_files["train_synth_rich_io"]}), #datasets.SplitGenerator(name='valid_synth', # gen_kwargs={"files": downloaded_files["valid_synth"]}), #datasets.SplitGenerator(name='valid_real', # gen_kwargs={"files": downloaded_files["valid_real"]}), datasets.SplitGenerator(name='test_synth', gen_kwargs={"files": downloaded_files["test_synth"]}), datasets.SplitGenerator(name='test_real', gen_kwargs={"files": downloaded_files["test_real"]}), ] def _generate_examples(self, files): """Yield examples as (key, example) tuples.""" key = 0 import zstandard as zstd for path in Path(files).rglob('*.jsonl.zst'): with zstd.open(open(path, "rb"), "rt", encoding="utf-8") as f: for row in f: data = json.loads(row) data = data['text'] data = self._fixes(data) for io_pairs_kind in ('synth_io_pairs', 'real_io_pairs'): if data[io_pairs_kind]: new_io_pairs = [] for e in data[io_pairs_kind]: new_e = {} new_e['input'] = [{'var': var, 'value': json.dumps(value)} for (var, value) in e['input'].items()] if e['input'] else [] new_e['output'] = [{'var': var, 'value': json.dumps(value)} for (var, value) in e['output'].items()] if e['output'] else [] new_e['dummy_funcs'] = e['dummy_funcs'] new_e['dummy_funcs_seed'] = e['dummy_funcs_seed'] new_io_pairs.append(new_e) data[io_pairs_kind] = new_io_pairs data['synth_iospec'] = json.dumps(data['synth_iospec']) data['real_iospec'] = json.dumps(data['real_iospec']) yield key, data key += 1 def _fixes(self, row): row['asm'] = [{'target': target, 'code': code['func_asm'] if code else None} for (target, code) in row['asm'].items()] # TODO: pre_asm etc for removed_key in _REMOVED_FEATURES: if removed_key in row: del row[removed_key] for original_key, new_key in _RENAMED_FEATURES.items(): row[new_key] = row[original_key] del row[original_key] return row