Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
354643b
1 Parent(s): 9834583

Upload multispider.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. multispider.py +180 -0
multispider.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ MULTISPIDER, the largest multilingual text-to-SQL dataset which covers \
18
+ seven languages (English, German, French, Spanish, Japanese, \
19
+ Chinese, and Vietnamese). Upon MULTISPIDER, we further identify \
20
+ the lexical and structural challenges of text-to-SQL (caused by \
21
+ specific language properties and dialect sayings) and their \
22
+ intensity across different languages.
23
+ """
24
+ from pathlib import Path
25
+ from typing import Dict, List, Tuple
26
+
27
+ import datasets
28
+ import pandas as pd
29
+
30
+ from seacrowd.utils import schemas
31
+ from seacrowd.utils.configs import SEACrowdConfig
32
+ from seacrowd.utils.constants import Tasks, Licenses
33
+
34
+ _CITATION = """\
35
+ @inproceedings{Dou2022MultiSpiderTB,
36
+ title={MultiSpider: Towards Benchmarking Multilingual Text-to-SQL Semantic Parsing},
37
+ author={Longxu Dou and Yan Gao and Mingyang Pan and Dingzirui Wang and Wanxiang Che and Dechen Zhan and Jian-Guang Lou},
38
+ booktitle={AAAI Conference on Artificial Intelligence},
39
+ year={2023},
40
+ url={https://ojs.aaai.org/index.php/AAAI/article/view/26499/26271}
41
+ }
42
+ """
43
+
44
+ _DATASETNAME = "multispider"
45
+
46
+ _DESCRIPTION = """\
47
+ MULTISPIDER, the largest multilingual text-to-SQL dataset which covers \
48
+ seven languages (English, German, French, Spanish, Japanese, \
49
+ Chinese, and Vietnamese). Upon MULTISPIDER, we further identify \
50
+ the lexical and structural challenges of text-to-SQL (caused by \
51
+ specific language properties and dialect sayings) and their \
52
+ intensity across different languages.
53
+ """
54
+
55
+ _HOMEPAGE = "https://github.com/longxudou/multispider"
56
+
57
+ _LANGUAGES = ["vie"]
58
+
59
+ _LICENSE = Licenses.CC_BY_4_0.value
60
+
61
+ _LOCAL = False
62
+
63
+ _URLS = {
64
+ "train": "https://huggingface.co/datasets/dreamerdeo/multispider/resolve/main/dataset/multispider/with_original_value/train_vi.json?download=true",
65
+ "dev": "https://huggingface.co/datasets/dreamerdeo/multispider/raw/main/dataset/multispider/with_original_value/dev_vi.json",
66
+ }
67
+
68
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
69
+
70
+ _SOURCE_VERSION = "1.0.0"
71
+
72
+ _SEACROWD_VERSION = "2024.06.20"
73
+
74
+
75
+ class MultispiderDataset(datasets.GeneratorBasedBuilder):
76
+ """
77
+ MULTISPIDER, the largest multilingual text-to-SQL dataset which covers \
78
+ seven languages (English, German, French, Spanish, Japanese, \
79
+ Chinese, and Vietnamese). Upon MULTISPIDER, we further identify \
80
+ the lexical and structural challenges of text-to-SQL (caused by \
81
+ specific language properties and dialect sayings) and their \
82
+ intensity across different languages.
83
+ """
84
+
85
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
86
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
87
+ SEACROWD_SCHEMA_NAME = "t2t"
88
+
89
+ BUILDER_CONFIGS = [
90
+ SEACrowdConfig(
91
+ name=f"{_DATASETNAME}_source",
92
+ version=SOURCE_VERSION,
93
+ description=f"{_DATASETNAME} source schema",
94
+ schema="source",
95
+ subset_id=f"{_DATASETNAME}",
96
+ ),
97
+ SEACrowdConfig(
98
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
99
+ version=SEACROWD_VERSION,
100
+ description=f"{_DATASETNAME} SEACrowd schema",
101
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
102
+ subset_id=f"{_DATASETNAME}",
103
+ ),
104
+ ]
105
+
106
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
107
+
108
+ def _info(self) -> datasets.DatasetInfo:
109
+
110
+ if self.config.schema == "source":
111
+ features = datasets.Features(
112
+ {
113
+ "db_id": datasets.Value("string"),
114
+ "query": datasets.Value("string"),
115
+ "question": datasets.Value("string"),
116
+ "query_toks": datasets.Sequence(feature=datasets.Value("string")),
117
+ "query_toks_no_value": datasets.Sequence(feature=datasets.Value("string")),
118
+ "question_toks": datasets.Sequence(feature=datasets.Value("string")),
119
+ "sql": datasets.Value("string"),
120
+ }
121
+ )
122
+
123
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
124
+ features = schemas.text2text_features
125
+
126
+ return datasets.DatasetInfo(
127
+ description=_DESCRIPTION,
128
+ features=features,
129
+ homepage=_HOMEPAGE,
130
+ license=_LICENSE,
131
+ citation=_CITATION,
132
+ )
133
+
134
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
135
+ """Returns SplitGenerators."""
136
+
137
+ data_path_train = Path(dl_manager.download_and_extract(_URLS["train"]))
138
+ data_path_dev = Path(dl_manager.download_and_extract(_URLS["dev"]))
139
+
140
+ return [
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TRAIN,
143
+ gen_kwargs={
144
+ "filepath": data_path_train,
145
+ "split": "train",
146
+ },
147
+ ),
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.VALIDATION,
150
+ gen_kwargs={
151
+ "filepath": data_path_dev,
152
+ "split": "dev",
153
+ },
154
+ ),
155
+ ]
156
+
157
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
158
+ """Yields examples as (key, example) tuples."""
159
+
160
+ df = pd.read_json(filepath)
161
+
162
+ for index, row in df.iterrows():
163
+
164
+ if self.config.schema == "source":
165
+ example = row.to_dict()
166
+
167
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
168
+ example = {
169
+ "id": str(index),
170
+ "text_1": str(row["question"]),
171
+ "text_2": str(row["query"]),
172
+ "text_1_name": "question",
173
+ "text_2_name": "query",
174
+ }
175
+
176
+ yield index, example
177
+
178
+
179
+ # This template is based on the following template from the datasets package:
180
+ # https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py