Search is not available for this dataset
tables
sequence
table_names
sequence
query
string
answer
string
db_name
string
source
string
target
string
["{\"columns\":[\"player_id\",\"year\",\"game_num\",\"game_id\",\"team_id\",\"league_id\",\"gp\",\"s(...TRUNCATED)
[ "all_star", "manager_award_vote" ]
"SELECT T1.game_num, AVG(T2.votes_first) FROM all_star as T1 JOIN manager_award_vote as T2 ON T1.lea(...TRUNCATED)
"{\"columns\":[\"game_num\",\"AVG(T2.votes_first)\"],\"index\":[0],\"data\":[[\"0\",\"4.5427945763\"(...TRUNCATED)
null
null
null
["{\"columns\":[\"player_id\",\"year\",\"game_num\",\"game_id\",\"team_id\",\"league_id\",\"gp\",\"s(...TRUNCATED)
[ "all_star", "player_college" ]
"SELECT MAX(T1.starting_pos), T2.year FROM all_star as T1 JOIN player_college as T2 ON T1.player_id (...TRUNCATED)
{"columns":["MAX(T1.starting_pos)","year"],"index":[0],"data":[["","1927"]]}
null
null
null
["{\"columns\":[\"player_id\",\"award_id\",\"year\",\"league_id\",\"tie\",\"notes\"],\"index\":[0,1,(...TRUNCATED)
[ "manager_award", "team_half" ]
"SELECT league_id, year FROM manager_award WHERE player_id = \"wedgeer01\" AND award_id = \"TSN Mana(...TRUNCATED)
"{\"columns\":[\"league_id\",\"year\"],\"index\":[0,1,2,3],\"data\":[[\"AL\",\"1981\"],[\"AL\",\"201(...TRUNCATED)
null
null
null
["{\"columns\":[\"player_id\",\"year\",\"game_num\",\"game_id\",\"team_id\",\"league_id\",\"gp\",\"s(...TRUNCATED)
[ "all_star", "fielding" ]
"SELECT T1.starting_pos, SUM(T2.a) FROM all_star as T1 JOIN fielding as T2 ON T1.league_id = T2.leag(...TRUNCATED)
{"columns":["starting_pos","SUM(T2.a)"],"index":[0],"data":[["1","11686740233"]]}
null
null
null
["{\"columns\":[\"player_id\",\"year\",\"game_num\",\"game_id\",\"team_id\",\"league_id\",\"gp\",\"s(...TRUNCATED)
[ "all_star", "pitching_postseason" ]
"SELECT MAX(T1.year), T2.g_idp FROM all_star as T1 JOIN pitching_postseason as T2 ON T1.team_id = T2(...TRUNCATED)
{"columns":["MAX(T1.year)","g_idp"],"index":[0],"data":[["2015","0"]]}
null
null
null
["{\"columns\":[\"customer_id\",\"datetime_payment\",\"payment_method_code\",\"amount_payment\"],\"i(...TRUNCATED)
[ "Customer_Payments", "Customers" ]
"SELECT T1.amount_payment, T2.amount_outstanding FROM Customer_Payments as T1 JOIN Customers as T2 O(...TRUNCATED)
"{\"columns\":[\"amount_payment\",\"amount_outstanding\"],\"index\":[0,1,2,3,4,5,6,7,8],\"data\":[[\(...TRUNCATED)
null
null
null
["{\"columns\":[\"Driver_ID\",\"Name\",\"Party\",\"Home_city\",\"Age\"],\"index\":[0,1,2,3,4,5,6,7,8(...TRUNCATED)
[ "driver", "school_bus" ]
"SELECT Driver_ID FROM driver GROUP BY Driver_ID HAVING Driver_ID = 10 UNION SELECT Driver_ID FROM s(...TRUNCATED)
"{\"columns\":[\"Driver_ID\"],\"index\":[0,1,2,3,4],\"data\":[[\"3\"],[\"4\"],[\"7\"],[\"9\"],[\"10\(...TRUNCATED)
null
null
null
["{\"columns\":[\"Branch_ID\",\"Name\",\"Open_year\",\"Address_road\",\"City\",\"membership_amount\"(...TRUNCATED)
[ "branch", "member" ]
"SELECT MIN(Name) FROM branch WHERE Open_year = \"2013\" OR Address_road <> \"Cecilia Avenue\" AND m(...TRUNCATED)
{"columns":["MIN(Name)"],"index":[0],"data":[["Alexandre"]]}
null
null
null
["{\"columns\":[\"player_id\",\"year\",\"game_num\",\"game_id\",\"team_id\",\"league_id\",\"gp\",\"s(...TRUNCATED)
[ "all_star", "manager_half" ]
"SELECT AVG(T1.game_num), T2.year FROM all_star as T1 JOIN manager_half as T2 ON T1.league_id = T2.l(...TRUNCATED)
{"columns":["AVG(T1.game_num)","year"],"index":[0],"data":[["0.1361423985","1981"]]}
null
null
null
["{\"columns\":[\"property_id\",\"property_type_code\",\"date_on_market\",\"date_sold\",\"property_n(...TRUNCATED)
[ "Properties", "Properties" ]
"SELECT MIN(oth_feature_3), AVG(hse_feature_3), date_on_market, AVG(apt_feature_1), MIN(property_add(...TRUNCATED)
"{\"columns\":[\"MIN(oth_feature_3)\",\"AVG(hse_feature_3)\",\"date_on_market\",\"AVG(apt_feature_1)(...TRUNCATED)
null
null
null

Usage

import pandas as pd
from datasets import load_dataset

multitableQA_pretraining = load_dataset("vaishali/multitabqa_pretraining")

for sample in multitableQA_pretraining['train']:
  sql_query = sample['query']
  input_table_names = sample["table_names"]
  input_tables = [pd.read_json(table, orient='split') for table in sample['tables']]
  answer = pd.read_json(sample['answer'], orient='split')

  # flattened input/output
  input_to_model = sample["source"]
  target = sample["target"]

BibTeX entry and citation info

@inproceedings{pal-etal-2023-multitabqa,
    title = "{M}ulti{T}ab{QA}: Generating Tabular Answers for Multi-Table Question Answering",
    author = "Pal, Vaishali  and
      Yates, Andrew  and
      Kanoulas, Evangelos  and
      de Rijke, Maarten",
    booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
    month = jul,
    year = "2023",
    address = "Toronto, Canada",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2023.acl-long.348",
    doi = "10.18653/v1/2023.acl-long.348",
    pages = "6322--6334",
    abstract = "Recent advances in tabular question answering (QA) with large language models are constrained in their coverage and only answer questions over a single table. However, real-world queries are complex in nature, often over multiple tables in a relational database or web page. Single table questions do not involve common table operations such as set operations, Cartesian products (joins), or nested queries. Furthermore, multi-table operations often result in a tabular output, which necessitates table generation capabilities of tabular QA models. To fill this gap, we propose a new task of answering questions over multiple tables. Our model, MultiTabQA, not only answers questions over multiple tables, but also generalizes to generate tabular answers. To enable effective training, we build a pre-training dataset comprising of 132,645 SQL queries and tabular answers. Further, we evaluate the generated tables by introducing table-specific metrics of varying strictness assessing various levels of granularity of the table structure. MultiTabQA outperforms state-of-the-art single table QA models adapted to a multi-table QA setting by finetuning on three datasets: Spider, Atis and GeoQuery.",
}
Downloads last month
192
Edit dataset card

Models trained or fine-tuned on vaishali/multitabqa_pretraining