Datasets:

Languages:
code
ArXiv:
License:
commitpack / commitpack.py
Muennighoff's picture
Update commitpack.py
66339e4
raw
history blame
7.76 kB
"""CommitPack"""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
### To create paths ###
def get_paths():
import json, glob, os
files = {}
for lang_dir in os.listdir("./data"):
print("Processing", lang_dir)
if not os.path.isdir("data/" + lang_dir):
print(f"Skipping {lang_dir} as it is not a directory")
continue
for file in glob.glob(f"data/{lang_dir}/*.jsonl"):
files[lang_dir] = files.get(lang_dir, []) + [file]
with open(f"paths.json", "w") as f:
json.dump(files, f)
return files
_CITATION = """\
@article{muennighoff2023octopack,
title={OctoPack: Instruction Tuning Code Large Language Models},
author={Niklas Muennighoff and Qian Liu and Armel Zebaze and Qinkai Zheng and Binyuan Hui and Terry Yue Zhuo and Swayam Singh and Xiangru Tang and Leandro von Werra and Shayne Longpre},
journal={arXiv preprint arXiv:2308.07124},
year={2023}
}
"""
_DESCRIPTION = """\
CommitPack is is a 4TB dataset of commits scraped from GitHub repositories that are permissively licensed.
"""
URL = "https://huggingface.co/datasets/bigcode/commitpack/resolve/main/paths.json"
_LANG = ["json", "xml", "text", "javascript", "objective-c++", "python", "c", "c++", "markdown", "java", "html", "yaml", "go", "csv", "php", "jupyter-notebook", "gettext-catalog", "sql", "unity3d-asset", "typescript", "web-ontology-language", "ruby", "c#", "nix", "shell", "perl", "tex", "css", "restructuredtext", "rust", "groff", "ini", "scala", "coffeescript", "haskell", "swift", "lua", "svg", "gas", "ocaml", "erlang", "makefile", "asciidoc", "emacs-lisp", "scss", "clojure", "org", "common-lisp", "diff", "groovy", "html+erb", "nesc", "dart", "powershell", "f#", "dm", "kotlin", "pascal", "jsx", "viml", "actionscript", "cython", "turtle", "less", "mathematica", "xslt", "scheme", "perl6", "edn", "fortran", "java-server-pages", "standard-ml", "cmake", "json5", "vala", "vue", "freemarker", "graphql", "twig", "tcl", "pod", "dockerfile", "yacc", "postscript", "racket", "eagle", "haxe", "julia", "handlebars", "smarty", "visual-basic", "literate-haskell", "smalltalk", "isabelle", "nimrod", "zig", "m4", "max", "elixir", "mako", "arduino", "jade", "haml", "elm", "purebasic", "coldfusion", "lean", "r", "cuda", "textile", "robotframework", "abap", "rdoc", "llvm", "ada", "batchfile", "qml", "jasmin", "assembly", "g-code", "cucumber", "html+php", "kicad", "api-blueprint", "eiffel", "toml", "modelica", "bitbake", "lex", "stylus", "protocol-buffer", "unknown", "nit", "factor", "xs", "sass", "parrot-internal-representation", "html+django", "mediawiki", "logos", "genshi", "coldfusion-cfc", "xtend", "sqf", "vhdl", "antlr", "systemverilog", "hcl", "asp", "nsis", "inform-7", "slim", "groovy-server-pages", "ceylon", "fish", "processing", "component-pascal", "lasso", "glsl", "saltstack", "xbase", "autohotkey", "liquid", "purescript", "agda", "inno-setup", "oz", "chapel", "arc", "opencl", "graphviz-dot", "pawn", "jsoniq", "bluespec", "smali", "krl", "maple", "unrealscript", "ooc", "pure-data", "xquery", "digital-command-language", "moonscript", "awk", "pike", "livescript", "solidity", "monkey", "jsonld", "zephir", "crystal", "rhtml", "stata", "idris", "raml", "openscad", "red", "c2hs-haskell", "cycript", "applescript", "mupad", "literate-agda", "boo", "sourcepawn", "qmake", "ragel-in-ruby-host", "io", "desktop", "propeller-spin", "thrift", "volt", "xproc", "igor-pro", "lolcode", "html+eex", "logtalk", "mirah", "gnuplot", "literate-coffeescript", "jflex", "emberscript", "cobol", "yang", "rebol", "linker-script", "cartocss", "urweb", "rmarkdown", "darcs-patch", "csound", "squirrel", "apl", "hlsl", "latte", "pony", "ioke", "hy", "uno", "pan", "xojo", "papyrus", "stan", "slash", "supercollider", "vcl", "smt", "glyph", "wisp", "renpy", "clips", "dns-zone", "sas", "rouge", "ec", "dylan", "tcsh", "aspectj", "netlogo", "gap", "fancy", "coq", "click", "capn-proto", "flux", "forth", "ats", "netlinx", "clean", "parrot-assembly", "alloy", "lfe", "gdscript", "augeas", "sparql", "lilypond", "scilab", "autoit", "myghty", "blitzmax", "creole", "harbour", "piglatin", "opa", "sage", "ston", "maxscript", "lsl", "gentoo-ebuild", "nu", "bro", "xc", "j", "metal", "module-management-system", "webidl", "tea", "redcode", "shen", "pov-ray-sdl", "x10", "brainfuck", "ninja", "golo", "webassembly", "self", "labview", "octave", "pogoscript", "d", "http", "ecl", "chuck", "gosu", "parrot", "opal", "objective-j", "kit", "gams", "prolog", "clarion", "mask", "brightscript", "scaml", "matlab", "idl", "ags-script", "lookml", "apacheconf", "oxygene", "txl", "grammatical-framework", "renderscript", "mtml", "unified-parallel-c", "dogescript", "gentoo-eclass", "zimpl", "irc-log", "fantom", "numpy", "cirru", "xpages", "nginx", "objdump", "python-traceback", "realbasic", "befunge", "bison", "m", "omgrofl"]
_LANG_TO_URL_ESCAPE = {
"c#": "c%23",
"f#": "f%23",
}
_LICENSE = "Apache License 2.0"
_VERSION = datasets.Version("1.0.0", "")
class CommitPack(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=lang,
description=f"CommitPack {lang}",
version=_VERSION,
)
for lang in _LANG
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"commit": datasets.Value("string"),
"old_file": datasets.Value("string"),
"new_file": datasets.Value("string"),
"old_contents": datasets.Value("string"),
"new_contents": datasets.Value("string"),
"subject": datasets.Value("string"),
"message": datasets.Value("string"),
"lang": datasets.Value("string"),
"license": datasets.Value("string"),
"repos": datasets.Value("string"),
# "returncode": datasets.Value("int64"),
# "stderr": datasets.Value("string"),
}
),
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
path_file = dl_manager.download(URL)
with open(path_file, "r") as f:
files = json.load(f)[self.config.name]
if self.config.name in _LANG_TO_URL_ESCAPE:
files = [f.replace("#", "%23") for f in files]
downloaded_files = dl_manager.download(files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'filepaths': downloaded_files}
)
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form."""
logger.info("Generating examples from", filepaths)
id_ = 0
for p in filepaths:
with open(p, "r") as f:
for row in f:
data = json.loads(row)
yield id_, {
"commit": data["commit"],
"old_file": data["old_file"],
"new_file": data["new_file"],
"old_contents": data["old_contents"],
"new_contents": data["new_contents"],
"subject": data["subject"],
"message": data["message"],
"lang": data["lang"],
"license": data["license"],
"repos": data["repos"],
}
id_ += 1