Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
74d5856
1 Parent(s): 3a938b7

Upload phoatis.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. phoatis.py +239 -0
phoatis.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+
6
+ from seacrowd.utils import schemas
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import Tasks, Licenses
9
+
10
+ _CITATION = """\
11
+ @article{dao2021intent,
12
+ title={Intent Detection and Slot Filling for Vietnamese},
13
+ author={Mai Hoang Dao and Thinh Hung Truong and Dat Quoc Nguyen},
14
+ year={2021},
15
+ eprint={2104.02021},
16
+ archivePrefix={arXiv},
17
+ primaryClass={cs.CL}
18
+ }
19
+ """
20
+
21
+ _DATASETNAME = "phoatis"
22
+
23
+ _DESCRIPTION = """\
24
+ This is first public intent detection and slot filling dataset for Vietnamese. The data contains 5871 English utterances from ATIS that are manually translated by professional translators into Vietnamese.
25
+ """
26
+
27
+ _HOMEPAGE = "https://github.com/VinAIResearch/JointIDSF/"
28
+
29
+ _LICENSE = Licenses.UNKNOWN.value
30
+
31
+ _URLS = {
32
+ _DATASETNAME: {
33
+ "syllable": {
34
+ "syllable_train": [
35
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/train/seq.in",
36
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/train/seq.out",
37
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/train/label",
38
+ ],
39
+ "syllable_dev": [
40
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/dev/seq.in",
41
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/dev/seq.out",
42
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/dev/label",
43
+ ],
44
+ "syllable_test": [
45
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/test/seq.in",
46
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/test/seq.out",
47
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/test/label",
48
+ ],
49
+ },
50
+ "word": {
51
+ "word_train": [
52
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/train/seq.in",
53
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/train/seq.out",
54
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/train/label",
55
+ ],
56
+ "word_dev": [
57
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/dev/seq.in",
58
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/dev/seq.out",
59
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/dev/label",
60
+ ],
61
+ "word_test": [
62
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/test/seq.in",
63
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/test/seq.out",
64
+ "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/word-level/test/label",
65
+ ],
66
+ },
67
+ }
68
+ }
69
+
70
+ _LOCAL = False
71
+ _LANGUAGES = ["vie"]
72
+
73
+ _SUPPORTED_TASKS = [Tasks.INTENT_CLASSIFICATION, Tasks.SLOT_FILLING]
74
+
75
+ _SOURCE_VERSION = "1.0.0"
76
+
77
+ _SEACROWD_VERSION = "2024.06.20"
78
+
79
+
80
+ def config_constructor_intent_cls(schema: str, version: str, phoatis_subset: str = "syllable") -> SEACrowdConfig:
81
+ assert phoatis_subset == "syllable" or phoatis_subset == "word"
82
+
83
+ return SEACrowdConfig(
84
+ name="phoatis_intent_cls_{phoatis_subset}_{schema}".format(phoatis_subset=phoatis_subset.lower(), schema=schema),
85
+ version=version,
86
+ description="PhoATIS Intent Classification: {subset} {schema} schema".format(subset=phoatis_subset, schema=schema),
87
+ schema=schema,
88
+ subset_id=phoatis_subset,
89
+ )
90
+
91
+
92
+ def config_constructor_slot_filling(schema: str, version: str, phoatis_subset: str = "syllable") -> SEACrowdConfig:
93
+ assert phoatis_subset == "syllable" or phoatis_subset == "word"
94
+
95
+ return SEACrowdConfig(
96
+ name="phoatis_slot_filling_{phoatis_subset}_{schema}".format(phoatis_subset=phoatis_subset.lower(), schema=schema),
97
+ version=version,
98
+ description="PhoATIS Slot Filling: {subset} {schema} schema".format(subset=phoatis_subset, schema=schema),
99
+ schema=schema,
100
+ subset_id=phoatis_subset,
101
+ )
102
+
103
+
104
+ class PhoATIS(datasets.GeneratorBasedBuilder):
105
+ """This is first public intent detection and slot filling dataset for Vietnamese. The data contains 5871 English utterances from ATIS that are manually translated by professional translators into Vietnamese."""
106
+
107
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
108
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
109
+
110
+ # BUILDER_CONFIGS = [config_constructor_intent_cls("source", _SOURCE_VERSION, subset) for subset in ["syllable", "word"]]
111
+ BUILDER_CONFIGS = []
112
+ BUILDER_CONFIGS.extend([config_constructor_intent_cls("seacrowd_text", _SEACROWD_VERSION, subset) for subset in ["syllable", "word"]])
113
+ # BUILDER_CONFIGS.extend([config_constructor_slot_filling("source", _SOURCE_VERSION, subset) for subset in ["syllable", "word"]])
114
+ BUILDER_CONFIGS.extend([config_constructor_slot_filling("seacrowd_seq_label", _SEACROWD_VERSION, subset) for subset in ["syllable", "word"]])
115
+
116
+ BUILDER_CONFIGS.extend(
117
+ [ # Default config
118
+ SEACrowdConfig(
119
+ name="phoatis_source",
120
+ version=SOURCE_VERSION,
121
+ description="PhoATIS source schema (Syllable version)",
122
+ schema="source",
123
+ subset_id="syllable",
124
+ ),
125
+ SEACrowdConfig(
126
+ name="phoatis_intent_cls_seacrowd_text",
127
+ version=SEACROWD_VERSION,
128
+ description="PhoATIS Intent Classification SEACrowd schema (Syllable version)",
129
+ schema="seacrowd_text",
130
+ subset_id="syllable",
131
+ ),
132
+ SEACrowdConfig(
133
+ name="phoatis_slot_filling_seacrowd_seq_label",
134
+ version=SEACROWD_VERSION,
135
+ description="PhoATIS Slot Filling SEACrowd schema (Syllable version)",
136
+ schema="seacrowd_seq_label",
137
+ subset_id="syllable",
138
+ ),
139
+ ]
140
+ )
141
+
142
+ DEFAULT_CONFIG_NAME = "phoatis_source"
143
+
144
+ def _info(self) -> datasets.DatasetInfo:
145
+
146
+ if self.config.schema == "source":
147
+ features = datasets.Features(
148
+ {
149
+ "id": datasets.Value("string"),
150
+ "text": datasets.Value("string"),
151
+ "intent_label": datasets.Value("string"),
152
+ "slot_label": datasets.Sequence(datasets.Value("string")),
153
+ }
154
+ )
155
+
156
+ elif self.config.schema == "seacrowd_text":
157
+ with open("./seacrowd/sea_datasets/phoatis/intent_label.txt", "r+", encoding="utf8") as fw:
158
+ intent_label = fw.read()
159
+ intent_label = intent_label.split("\n")
160
+ features = schemas.text_features(intent_label)
161
+
162
+ elif self.config.schema == "seacrowd_seq_label":
163
+ with open("./seacrowd/sea_datasets/phoatis/slot_label.txt", "r+", encoding="utf8") as fw:
164
+ slot_label = fw.read()
165
+ slot_label = slot_label.split("\n")
166
+ features = schemas.seq_label_features(slot_label)
167
+
168
+ return datasets.DatasetInfo(
169
+ description=_DESCRIPTION,
170
+ features=features,
171
+ homepage=_HOMEPAGE,
172
+ license=_LICENSE,
173
+ citation=_CITATION,
174
+ )
175
+
176
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
177
+ schema = self.config.subset_id
178
+ urls = _URLS[_DATASETNAME][schema]
179
+ data_dir = dl_manager.download_and_extract(urls)
180
+
181
+ return [
182
+ datasets.SplitGenerator(
183
+ name=datasets.Split.TRAIN,
184
+ gen_kwargs={
185
+ "filepath": data_dir[f"{schema}_train"],
186
+ "split": "train",
187
+ },
188
+ ),
189
+ datasets.SplitGenerator(
190
+ name=datasets.Split.TEST,
191
+ gen_kwargs={
192
+ "filepath": data_dir[f"{schema}_test"],
193
+ "split": "test",
194
+ },
195
+ ),
196
+ datasets.SplitGenerator(
197
+ name=datasets.Split.VALIDATION,
198
+ gen_kwargs={
199
+ "filepath": data_dir[f"{schema}_dev"],
200
+ "split": "dev",
201
+ },
202
+ ),
203
+ ]
204
+
205
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
206
+ with open(filepath[0], "r+", encoding="utf8") as fw:
207
+ data_input = fw.read()
208
+ data_input = data_input.split("\n")
209
+ with open(filepath[1], "r+", encoding="utf8") as fw:
210
+ data_slot = fw.read()
211
+ data_slot = data_slot.split("\n")
212
+ with open(filepath[2], "r+", encoding="utf8") as fw:
213
+ data_intent = fw.read()
214
+ data_intent = data_intent.split("\n")
215
+
216
+ if self.config.schema == "source":
217
+ for idx, text in enumerate(data_input):
218
+ example = {}
219
+ example["id"] = str(idx)
220
+ example["text"] = text
221
+ example["intent_label"] = data_intent[idx]
222
+ example["slot_label"] = data_slot[idx].split()
223
+ yield example["id"], example
224
+
225
+ elif self.config.schema == "seacrowd_text":
226
+ for idx, text in enumerate(data_input):
227
+ example = {}
228
+ example["id"] = str(idx)
229
+ example["text"] = text
230
+ example["label"] = data_intent[idx]
231
+ yield example["id"], example
232
+
233
+ elif self.config.schema == "seacrowd_seq_label":
234
+ for idx, text in enumerate(data_input):
235
+ example = {}
236
+ example["id"] = str(idx)
237
+ example["tokens"] = text.split()
238
+ example["labels"] = data_slot[idx].split()
239
+ yield example["id"], example