File size: 6,573 Bytes
4979160 3b96d42 4979160 3b96d42 4979160 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
"""A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
import json
import os
import datasets
_CITATION = """\
@article{zhongSeq2SQL2017,
author = {Victor Zhong and
Caiming Xiong and
Richard Socher},
title = {Seq2SQL: Generating Structured Queries from Natural Language using
Reinforcement Learning},
journal = {CoRR},
volume = {abs/1709.00103},
year = {2017}
}
"""
_DESCRIPTION = """\
A large crowd-sourced dataset for developing natural language interfaces for relational databases
"""
_DATA_URL = "https://github.com/salesforce/WikiSQL/raw/master/data.tar.bz2"
_AGG_OPS = ["", "MAX", "MIN", "COUNT", "SUM", "AVG"]
_COND_OPS = ["=", ">", "<", "OP"]
class WikiSQL(datasets.GeneratorBasedBuilder):
"""WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
VERSION = datasets.Version("0.1.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"phase": datasets.Value("int32"),
"question": datasets.Value("string"),
"table": {
"header": datasets.features.Sequence(datasets.Value("string")),
"page_title": datasets.Value("string"),
"page_id": datasets.Value("string"),
"types": datasets.features.Sequence(datasets.Value("string")),
"id": datasets.Value("string"),
"section_title": datasets.Value("string"),
"caption": datasets.Value("string"),
"rows": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
"name": datasets.Value("string"),
},
"sql": {
"human_readable": datasets.Value("string"),
"sel": datasets.Value("int32"),
"agg": datasets.Value("int32"),
"conds": datasets.features.Sequence(
{
"column_index": datasets.Value("int32"),
"operator_index": datasets.Value("int32"),
"condition": datasets.Value("string"),
}
),
},
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/salesforce/WikiSQL",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_DATA_URL)
dl_dir = os.path.join(dl_dir, "data")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, "test.jsonl"),
"tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, "dev.jsonl"),
"tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, "train.jsonl"),
"tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
},
),
]
def _convert_to_human_readable(self, sel, agg, columns, conditions):
"""Make SQL query string. Based on https://github.com/salesforce/WikiSQL/blob/c2ed4f9b22db1cc2721805d53e6e76e07e2ccbdc/lib/query.py#L10"""
rep = f"SELECT {_AGG_OPS[agg]} {columns[sel] if columns is not None else f'col{sel}'} FROM table"
if conditions:
rep += " WHERE " + " AND ".join([f"{columns[i]} {_COND_OPS[o]} {v}" for i, o, v in conditions])
return " ".join(rep.split())
def _generate_examples(self, main_filepath, tables_filepath):
"""Yields examples."""
# Build dictionary to table_ids:tables
with open(tables_filepath, encoding="utf-8") as f:
tables = [json.loads(line) for line in f]
id_to_tables = {x["id"]: x for x in tables}
with open(main_filepath, encoding="utf-8") as f:
for idx, line in enumerate(f):
row = json.loads(line)
row["table"] = id_to_tables[row["table_id"]]
del row["table_id"]
# Handle missing data
row["table"]["page_title"] = row["table"].get("page_title", "")
row["table"]["section_title"] = row["table"].get("section_title", "")
row["table"]["caption"] = row["table"].get("caption", "")
row["table"]["name"] = row["table"].get("name", "")
row["table"]["page_id"] = str(row["table"].get("page_id", ""))
# Fix row types
row["table"]["rows"] = [[str(e) for e in r] for r in row["table"]["rows"]]
# Get human-readable version
row["sql"]["human_readable"] = self._convert_to_human_readable(
row["sql"]["sel"],
row["sql"]["agg"],
row["table"]["header"],
row["sql"]["conds"],
)
# Restructure sql->conds
# - wikiSQL provides a tuple [column_index, operator_index, condition]
# as 'condition' can have 2 types (float or str) we convert to dict
for i in range(len(row["sql"]["conds"])):
row["sql"]["conds"][i] = {
"column_index": row["sql"]["conds"][i][0],
"operator_index": row["sql"]["conds"][i][1],
"condition": str(row["sql"]["conds"][i][2]),
}
yield idx, row
|