|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The ExeBench dataset.""" |
|
|
|
import json |
|
|
|
import datasets |
|
|
|
from pathlib import Path |
|
|
|
|
|
_CITATION = """\ |
|
@misc{TODO |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
An ML-scale dataset of executable C functions |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/jordiae/exebench" |
|
|
|
_LICENSE = "Multiple: see each function license (fields 'ref' and 'path')" |
|
|
|
_URL = "" |
|
|
|
_REMOVED_FEATURES = ["doc", "angha_error", "real_error", "angha_io_error", "real_io_error", |
|
"angha_io_pairs_are_trivial", "real_io_pairs_are_trivial"] |
|
|
|
_RENAMED_FEATURES = {"angha_deps": "synth_deps", "angha_io_pairs": "synth_io_pairs", |
|
"angha_exe_wrapper": "synth_exe_wrapper", "angha_iospec": "synth_iospec"} |
|
|
|
_FEATURES = datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"func_def": datasets.Value("string"), |
|
"func_head": datasets.Value("string"), |
|
"fname": datasets.Value("string"), |
|
"signature": datasets.Sequence(datasets.Value("string")), |
|
|
|
|
|
|
|
"asm": datasets.Sequence({'target': datasets.Value("string"), 'code': datasets.Value("string")}), |
|
"synth_deps": datasets.Value("string"), |
|
"real_deps": datasets.Value("string"), |
|
"synth_io_pairs": datasets.Sequence({ |
|
"input": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}), |
|
"output": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}), |
|
"dummy_funcs": datasets.Value("string"), |
|
"dummy_funcs_seed": datasets.Value("int64") |
|
}), |
|
"real_io_pairs": datasets.Sequence({ |
|
"input": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}), |
|
"output": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}), |
|
"dummy_funcs": datasets.Value("string"), |
|
"dummy_funcs_seed": datasets.Value("int64") |
|
}), |
|
|
|
|
|
"synth_exe_wrapper": datasets.Value("string"), |
|
"real_exe_wrapper": datasets.Value("string"), |
|
|
|
|
|
"ref": datasets.Value("string"), |
|
"synth_iospec": datasets.Value("string"), |
|
"real_iospec": datasets.Value("string") |
|
} |
|
) |
|
|
|
|
|
class ExeBenchConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for ExeBench.""" |
|
|
|
def __init__(self, *args, **kwargs): |
|
"""BuilderConfig for The Pile. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__( |
|
*args, |
|
**kwargs, |
|
) |
|
|
|
|
|
class ExeBench(datasets.GeneratorBasedBuilder): |
|
"""Semantic Textual Similarity Ca dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
ExeBenchConfig( |
|
name="ExeBench", |
|
version=datasets.Version("1.0.1"), |
|
description="Executable C dataset" |
|
), |
|
] |
|
|
|
def _info(self): |
|
"""Give information and typings for the dataset.""" |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=_FEATURES, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
urls_to_download = { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"test_synth": f"{_URL}test_synth.tar.gz", |
|
"test_real": f"{_URL}test_real.tar.gz", |
|
} |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
return [ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
datasets.SplitGenerator(name='test_synth', |
|
gen_kwargs={"files": downloaded_files["test_synth"]}), |
|
datasets.SplitGenerator(name='test_real', |
|
gen_kwargs={"files": downloaded_files["test_real"]}), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
"""Yield examples as (key, example) tuples.""" |
|
key = 0 |
|
import zstandard as zstd |
|
|
|
for path in Path(files).rglob('*.jsonl.zst'): |
|
with zstd.open(open(path, "rb"), "rt", encoding="utf-8") as f: |
|
for row in f: |
|
data = json.loads(row) |
|
data = data['text'] |
|
data = self._fixes(data) |
|
for io_pairs_kind in ('synth_io_pairs', 'real_io_pairs'): |
|
if data[io_pairs_kind]: |
|
new_io_pairs = [] |
|
for e in data[io_pairs_kind]: |
|
new_e = {} |
|
new_e['input'] = [{'var': var, 'value': json.dumps(value)} for (var, value) in e['input'].items()] if e['input'] else [] |
|
new_e['output'] = [{'var': var, 'value': json.dumps(value)} for (var, value) in e['output'].items()] if e['output'] else [] |
|
new_e['dummy_funcs'] = e['dummy_funcs'] |
|
new_e['dummy_funcs_seed'] = e['dummy_funcs_seed'] |
|
new_io_pairs.append(new_e) |
|
data[io_pairs_kind] = new_io_pairs |
|
data['synth_iospec'] = json.dumps(data['synth_iospec']) |
|
data['real_iospec'] = json.dumps(data['real_iospec']) |
|
yield key, data |
|
key += 1 |
|
|
|
def _fixes(self, row): |
|
row['asm'] = [{'target': target, 'code': code['func_asm'] if code else None} for (target, code) in |
|
row['asm'].items()] |
|
for removed_key in _REMOVED_FEATURES: |
|
if removed_key in row: |
|
del row[removed_key] |
|
for original_key, new_key in _RENAMED_FEATURES.items(): |
|
row[new_key] = row[original_key] |
|
del row[original_key] |
|
return row |
|
|
|
|