codequeries / codequeries.py
sps's picture
Fix load script error
efdb8a6
raw
history blame
12 kB
# coding=utf-8
# Copyright 2022 CodeQueries Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CodeQueries: The CodeQueries benchmark dataset."""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CODEQUERIES_CITATION = """\
@article{codequeries2022,
title={Learning to Answer Semantic Queries over Code},
author={A, B, C, D, E, F},
journal={arXiv preprint arXiv:<.>},
year={2022}
}
"""
_IDEAL_DESCRIPTION = """\
CodeQueries Ideal setup.
"""
_PREFIX_DESCRIPTION = """\
CodeQueries Prefix setup."""
_FILE_IDEAL_DESCRIPTION = """\
CodeQueries File level Ideal setup."""
_TWOSTEP_DESCRIPTION = """\
CodeQueries Twostep setup."""
class CodequeriesConfig(datasets.BuilderConfig):
"""BuilderConfig for Codequeries."""
def __init__(self, features, citation, data_url, url, **kwargs):
"""BuilderConfig for Codequeries.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
citation: `string`, citation for the data set.
data_url: `string`, relative data path in repo
url: `string`, link to dataset info page
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 1.0.0: Initial version.
super(CodequeriesConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
self.citation = citation
self.data_url = data_url
self.url = url
class Codequeries(datasets.GeneratorBasedBuilder):
"""The Codequeries benchmark."""
BUILDER_CONFIGS = [
CodequeriesConfig(
name="ideal",
description=_IDEAL_DESCRIPTION,
features=["query_name", "code_file_path", "context_blocks",
"answer_spans", "supporting_fact_spans",
"example_type", "single_hop",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
data_url={
"train": "ideal_train.json",
"dev": "ideal_val.json",
"test": "ideal_test.json"
},
url="https://huggingface.co/datasets/thepurpleowl/codequeries",
),
CodequeriesConfig(
name="prefix",
description=_PREFIX_DESCRIPTION,
features=["query_name", "code_file_path",
"answer_spans", "supporting_fact_spans",
"example_type", "single_hop",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
data_url={
"test": "prefix_test.json"
},
url="https://huggingface.co/datasets/thepurpleowl/codequeries",
),
CodequeriesConfig(
name="file_ideal",
description=_FILE_IDEAL_DESCRIPTION,
features=["query_name", "code_file_path", "context_blocks",
"answer_spans", "supporting_fact_spans",
"example_type", "single_hop",
"subtokenized_input_sequence", "label_sequence"],
citation=_CODEQUERIES_CITATION,
data_url={
"test": "file_ideal_test.json"
},
url="https://huggingface.co/datasets/thepurpleowl/codequeries",
),
CodequeriesConfig(
name="twostep",
description=_TWOSTEP_DESCRIPTION,
features=["query_name", "code_file_path", "context_block",
"answer_spans", "supporting_fact_spans",
"example_type", "single_hop",
"subtokenized_input_sequence", "label_sequence",
"relevance_label"],
citation=_CODEQUERIES_CITATION,
data_url={
"train": ["twostep_relevance/" + "twostep_relevance_train_" + str(i) + ".json" for i in range(0, 10)],
"dev": ["twostep_relevance/" + "twostep_relevance_dev_" + str(i) + ".json" for i in range(0, 2)],
"test": ["twostep_relevance/" + "twostep_relevance_test_" + str(i) + ".json" for i in range(0, 10)]
},
url="https://huggingface.co/datasets/thepurpleowl/codequeries",
),
]
DEFAULT_CONFIG_NAME = "ideal"
def _info(self):
features = {}
features["query_name"] = datasets.Value("string")
features["code_file_path"] = datasets.Value("string")
if self.config.name != "prefix":
if self.config.name == "twostep":
features["context_block"] = {
"content": datasets.Value("string"),
"metadata": datasets.Value("string"),
"header": datasets.Value("string"),
"index": datasets.Value("int32")
}
else:
features["context_blocks"] = [
{
"content": datasets.Value("string"),
"metadata": datasets.Value("string"),
"header": datasets.Value("string"),
"index": datasets.Value("int32")
}
]
features["answer_spans"] = [
{
'span': datasets.Value("string"),
'start_line': datasets.Value("int32"),
'start_column': datasets.Value("int32"),
'end_line': datasets.Value("int32"),
'end_column': datasets.Value("int32")
}
]
features["supporting_fact_spans"] = [
{
'span': datasets.Value("string"),
'start_line': datasets.Value("int32"),
'start_column': datasets.Value("int32"),
'end_line': datasets.Value("int32"),
'end_column': datasets.Value("int32")
}
]
features["example_type"] = datasets.Value("int8")
features["single_hop"] = datasets.Value("bool")
if self.config.name != "prefix":
features["subtokenized_input_sequence"] = datasets.features.Sequence(datasets.Value("string"))
else:
features["subtokenized_input_sequence"] = datasets.features.Sequence(datasets.Value("int32"))
features["label_sequence"] = datasets.features.Sequence(datasets.Value("int8"))
if self.config.name == "twostep":
features["relevance_label"] = datasets.Value("int8")
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=_CODEQUERIES_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
if self.config.name in ["prefix", "file_ideal"]:
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": dl_dir["test"],
"split": datasets.Split.TEST,
},
),
]
else:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": dl_dir["train"],
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": dl_dir["dev"],
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": dl_dir["test"],
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, filepath, split):
if self.config.name in ["prefix", "file_ideal"]:
assert split == datasets.Split.TEST
logger.info("Generating examples from = %s", filepath)
if self.config.name == "twostep":
key = 0
for fp in filepath:
with open(fp, encoding="utf-8") as f:
for line in f:
row = json.loads(line)
instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
yield instance_key, {
"query_name": row["query_name"],
"code_file_path": row["code_file_path"],
"context_block": row["context_blocks"], # single context block
"answer_spans": row["answer_spans"],
"supporting_fact_spans": row["supporting_fact_spans"],
"example_type": row["example_type"],
"single_hop": row["single_hop"],
"subtokenized_input_sequence": row["subtokenized_input_sequence"],
"label_sequence": row["label_sequence"],
"relevance_label": row["relevance_label"],
}
key += 1
elif self.config.name == "prefix":
with open(filepath, encoding="utf-8") as f:
key = 0
for line in f:
row = json.loads(line)
instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
yield instance_key, {
"query_name": row["query_name"],
"code_file_path": row["code_file_path"],
"answer_spans": row["answer_spans"],
"supporting_fact_spans": row["supporting_fact_spans"],
"example_type": row["example_type"],
"single_hop": row["single_hop"],
"subtokenized_input_sequence": row["subtokenized_input_sequence"],
"label_sequence": row["label_sequence"],
}
key += 1
else:
with open(filepath, encoding="utf-8") as f:
key = 0
for line in f:
row = json.loads(line)
instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
yield instance_key, {
"query_name": row["query_name"],
"code_file_path": row["code_file_path"],
"context_blocks": row["context_blocks"],
"answer_spans": row["answer_spans"],
"supporting_fact_spans": row["supporting_fact_spans"],
"example_type": row["example_type"],
"single_hop": row["single_hop"],
"subtokenized_input_sequence": row["subtokenized_input_sequence"],
"label_sequence": row["label_sequence"],
}
key += 1