holylovenia commited on
Commit
3194c44
1 Parent(s): b038e6d

Upload massive.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. massive.py +580 -0
massive.py ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import List
3
+
4
+ import datasets
5
+
6
+ from seacrowd.utils import schemas
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import Licenses, Tasks
9
+
10
+ _CITATION = """\
11
+ @misc{fitzgerald2022massive,
12
+ title={MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages},
13
+ author={Jack FitzGerald and Christopher Hench and Charith Peris and Scott Mackie and Kay Rottmann and Ana Sanchez and Aaron
14
+ Nash and Liam Urbach and Vishesh Kakarala and Richa Singh and Swetha Ranganath and Laurie Crist and Misha Britan and Wouter
15
+ Leeuwis and Gokhan Tur and Prem Natarajan},
16
+ year={2022},
17
+ eprint={2204.08582},
18
+ archivePrefix={arXiv},
19
+ primaryClass={cs.CL}
20
+ }
21
+ @inproceedings{bastianelli-etal-2020-slurp,
22
+ title = "{SLURP}: A Spoken Language Understanding Resource Package",
23
+ author = "Bastianelli, Emanuele and
24
+ Vanzo, Andrea and
25
+ Swietojanski, Pawel and
26
+ Rieser, Verena",
27
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
28
+ month = nov,
29
+ year = "2020",
30
+ address = "Online",
31
+ publisher = "Association for Computational Linguistics",
32
+ url = "https://aclanthology.org/2020.emnlp-main.588",
33
+ doi = "10.18653/v1/2020.emnlp-main.588",
34
+ pages = "7252--7262",
35
+ abstract = "Spoken Language Understanding infers semantic meaning directly from audio data, and thus promises to
36
+ reduce error propagation and misunderstandings in end-user applications. However, publicly available SLU resources are limited.
37
+ In this paper, we release SLURP, a new SLU package containing the following: (1) A new challenging dataset in English spanning
38
+ 18 domains, which is substantially bigger and linguistically more diverse than existing datasets; (2) Competitive baselines
39
+ based on state-of-the-art NLU and ASR systems; (3) A new transparent metric for entity labelling which enables a detailed error
40
+ analysis for identifying potential areas of improvement. SLURP is available at https://github.com/pswietojanski/slurp."
41
+ }
42
+ """
43
+ _DATASETNAME = "massive"
44
+ _DESCRIPTION = """\
45
+ MASSIVE dataset—Multilingual Amazon Slu resource package (SLURP) for Slot-filling, Intent classification, and
46
+ Virtual assistant Evaluation. MASSIVE contains 1M realistic, parallel, labeled virtual assistant utterances
47
+ spanning 18 domains, 60 intents, and 55 slots. MASSIVE was created by tasking professional translators to
48
+ localize the English-only SLURP dataset into 50 typologically diverse languages, including 8 native languages
49
+ and 2 other languages mostly spoken in Southeast Asia.
50
+ """
51
+ _HOMEPAGE = "https://github.com/alexa/massive"
52
+ _LICENSE = Licenses.CC_BY_4_0.value
53
+ _LOCAL = False
54
+ _LANGUAGES = ["ind", "jav", "khm", "zlm", "mya", "tha", "tgl", "vie"]
55
+
56
+ _URLS = {
57
+ _DATASETNAME: "https://amazon-massive-nlu-dataset.s3.amazonaws.com/amazon-massive-dataset-1.1.tar.gz",
58
+ }
59
+ _SUPPORTED_TASKS = [Tasks.INTENT_CLASSIFICATION, Tasks.SLOT_FILLING]
60
+ _SOURCE_VERSION = "1.1.0"
61
+ _SEACROWD_VERSION = "2024.06.20"
62
+
63
+ # ind, jav, khm, zlm, mya, tha, tgl, vie, cmn, tam
64
+ _LANGS = [
65
+ "af-ZA",
66
+ "am-ET",
67
+ "ar-SA",
68
+ "az-AZ",
69
+ "bn-BD",
70
+ "cy-GB",
71
+ "da-DK",
72
+ "de-DE",
73
+ "el-GR",
74
+ "en-US",
75
+ "es-ES",
76
+ "fa-IR",
77
+ "fi-FI",
78
+ "fr-FR",
79
+ "he-IL",
80
+ "hi-IN",
81
+ "hu-HU",
82
+ "hy-AM",
83
+ "id-ID", # ind
84
+ "is-IS",
85
+ "it-IT",
86
+ "ja-JP",
87
+ "jv-ID", # jav
88
+ "ka-GE",
89
+ "km-KH", # khm
90
+ "kn-IN",
91
+ "ko-KR",
92
+ "lv-LV",
93
+ "ml-IN",
94
+ "mn-MN",
95
+ "ms-MY", # zlm
96
+ "my-MM", # mya
97
+ "nb-NO",
98
+ "nl-NL",
99
+ "pl-PL",
100
+ "pt-PT",
101
+ "ro-RO",
102
+ "ru-RU",
103
+ "sl-SL",
104
+ "sq-AL",
105
+ "sv-SE",
106
+ "sw-KE",
107
+ "ta-IN",
108
+ "te-IN",
109
+ "th-TH", # tha
110
+ "tl-PH", # tgl
111
+ "tr-TR",
112
+ "ur-PK",
113
+ "vi-VN", # vie
114
+ "zh-CN", # cmn
115
+ "zh-TW",
116
+ ]
117
+ _SUBSETS = ["id-ID", "jv-ID", "km-KH", "ms-MY", "my-MM", "th-TH", "tl-PH", "vi-VN"]
118
+
119
+ _SCENARIOS = ["calendar", "recommendation", "social", "general", "news", "cooking", "iot", "email", "weather", "alarm", "transport", "lists", "takeaway", "play", "audio", "music", "qa", "datetime"]
120
+
121
+ _INTENTS = [
122
+ "audio_volume_other",
123
+ "play_music",
124
+ "iot_hue_lighton",
125
+ "general_greet",
126
+ "calendar_set",
127
+ "audio_volume_down",
128
+ "social_query",
129
+ "audio_volume_mute",
130
+ "iot_wemo_on",
131
+ "iot_hue_lightup",
132
+ "audio_volume_up",
133
+ "iot_coffee",
134
+ "takeaway_query",
135
+ "qa_maths",
136
+ "play_game",
137
+ "cooking_query",
138
+ "iot_hue_lightdim",
139
+ "iot_wemo_off",
140
+ "music_settings",
141
+ "weather_query",
142
+ "news_query",
143
+ "alarm_remove",
144
+ "social_post",
145
+ "recommendation_events",
146
+ "transport_taxi",
147
+ "takeaway_order",
148
+ "music_query",
149
+ "calendar_query",
150
+ "lists_query",
151
+ "qa_currency",
152
+ "recommendation_movies",
153
+ "general_joke",
154
+ "recommendation_locations",
155
+ "email_querycontact",
156
+ "lists_remove",
157
+ "play_audiobook",
158
+ "email_addcontact",
159
+ "lists_createoradd",
160
+ "play_radio",
161
+ "qa_stock",
162
+ "alarm_query",
163
+ "email_sendemail",
164
+ "general_quirky",
165
+ "music_likeness",
166
+ "cooking_recipe",
167
+ "email_query",
168
+ "datetime_query",
169
+ "transport_traffic",
170
+ "play_podcasts",
171
+ "iot_hue_lightchange",
172
+ "calendar_remove",
173
+ "transport_query",
174
+ "transport_ticket",
175
+ "qa_factoid",
176
+ "iot_cleaning",
177
+ "alarm_set",
178
+ "datetime_convert",
179
+ "iot_hue_lightoff",
180
+ "qa_definition",
181
+ "music_dislikeness",
182
+ ]
183
+
184
+ _TAGS = [
185
+ "O",
186
+ "B-food_type",
187
+ "B-movie_type",
188
+ "B-person",
189
+ "B-change_amount",
190
+ "I-relation",
191
+ "I-game_name",
192
+ "B-date",
193
+ "B-movie_name",
194
+ "I-person",
195
+ "I-place_name",
196
+ "I-podcast_descriptor",
197
+ "I-audiobook_name",
198
+ "B-email_folder",
199
+ "B-coffee_type",
200
+ "B-app_name",
201
+ "I-time",
202
+ "I-coffee_type",
203
+ "B-transport_agency",
204
+ "B-podcast_descriptor",
205
+ "I-playlist_name",
206
+ "B-media_type",
207
+ "B-song_name",
208
+ "I-music_descriptor",
209
+ "I-song_name",
210
+ "B-event_name",
211
+ "I-timeofday",
212
+ "B-alarm_type",
213
+ "B-cooking_type",
214
+ "I-business_name",
215
+ "I-color_type",
216
+ "B-podcast_name",
217
+ "I-personal_info",
218
+ "B-weather_descriptor",
219
+ "I-list_name",
220
+ "B-transport_descriptor",
221
+ "I-game_type",
222
+ "I-date",
223
+ "B-place_name",
224
+ "B-color_type",
225
+ "B-game_name",
226
+ "I-artist_name",
227
+ "I-drink_type",
228
+ "B-business_name",
229
+ "B-timeofday",
230
+ "B-sport_type",
231
+ "I-player_setting",
232
+ "I-transport_agency",
233
+ "B-game_type",
234
+ "B-player_setting",
235
+ "I-music_album",
236
+ "I-event_name",
237
+ "I-general_frequency",
238
+ "I-podcast_name",
239
+ "I-cooking_type",
240
+ "I-radio_name",
241
+ "I-joke_type",
242
+ "I-meal_type",
243
+ "I-transport_type",
244
+ "B-joke_type",
245
+ "B-time",
246
+ "B-order_type",
247
+ "B-business_type",
248
+ "B-general_frequency",
249
+ "I-food_type",
250
+ "I-time_zone",
251
+ "B-currency_name",
252
+ "B-time_zone",
253
+ "B-ingredient",
254
+ "B-house_place",
255
+ "B-audiobook_name",
256
+ "I-ingredient",
257
+ "I-media_type",
258
+ "I-news_topic",
259
+ "B-music_genre",
260
+ "I-definition_word",
261
+ "B-list_name",
262
+ "B-playlist_name",
263
+ "B-email_address",
264
+ "I-currency_name",
265
+ "I-movie_name",
266
+ "I-device_type",
267
+ "I-weather_descriptor",
268
+ "B-audiobook_author",
269
+ "I-audiobook_author",
270
+ "I-app_name",
271
+ "I-order_type",
272
+ "I-transport_name",
273
+ "B-radio_name",
274
+ "I-business_type",
275
+ "B-definition_word",
276
+ "B-artist_name",
277
+ "I-movie_type",
278
+ "B-transport_name",
279
+ "I-email_folder",
280
+ "B-music_album",
281
+ "I-house_place",
282
+ "I-music_genre",
283
+ "B-drink_type",
284
+ "I-alarm_type",
285
+ "B-music_descriptor",
286
+ "B-news_topic",
287
+ "B-meal_type",
288
+ "I-transport_descriptor",
289
+ "I-email_address",
290
+ "I-change_amount",
291
+ "B-device_type",
292
+ "B-transport_type",
293
+ "B-relation",
294
+ "I-sport_type",
295
+ "B-personal_info",
296
+ ]
297
+
298
+
299
+ class MASSIVEDataset(datasets.GeneratorBasedBuilder):
300
+ """MASSIVE datasets contains datasets to detect the intent from the text and fill the dialogue slots"""
301
+
302
+ BUILDER_CONFIGS = (
303
+ [
304
+ SEACrowdConfig(
305
+ name=f"massive_{subset}_source",
306
+ version=datasets.Version(_SOURCE_VERSION),
307
+ description=f"MASSIVE source schema for {subset}",
308
+ schema="source",
309
+ subset_id="massive_" + subset,
310
+ )
311
+ for subset in _SUBSETS
312
+ ]
313
+ + [
314
+ SEACrowdConfig(
315
+ name=f"massive_{subset}_seacrowd_text",
316
+ version=datasets.Version(_SEACROWD_VERSION),
317
+ description=f"MASSIVE Nusantara intent classification schema for {subset}",
318
+ schema="seacrowd_text",
319
+ subset_id="massive_intent_" + subset,
320
+ )
321
+ for subset in _SUBSETS
322
+ ]
323
+ + [
324
+ SEACrowdConfig(
325
+ name=f"massive_{subset}_seacrowd_seq_label",
326
+ version=datasets.Version(_SEACROWD_VERSION),
327
+ description=f"MASSIVE Nusantara slot filling schema for {subset}",
328
+ schema="seacrowd_seq_label",
329
+ subset_id="massive_slot_filling_" + subset,
330
+ )
331
+ for subset in _SUBSETS
332
+ ]
333
+ + [
334
+ SEACrowdConfig(
335
+ name="massive_source",
336
+ version=datasets.Version(_SOURCE_VERSION),
337
+ description="MASSIVE source schema",
338
+ schema="source",
339
+ subset_id="massive",
340
+ ),
341
+ SEACrowdConfig(
342
+ name="massive_seacrowd_text",
343
+ version=datasets.Version(_SEACROWD_VERSION),
344
+ description="MASSIVE Nusantara intent classification schema",
345
+ schema="seacrowd_text",
346
+ subset_id="massive_intent",
347
+ ),
348
+ SEACrowdConfig(
349
+ name="massive_seacrowd_seq_label",
350
+ version=datasets.Version(_SEACROWD_VERSION),
351
+ description="MASSIVE Nusantara slot filling schema",
352
+ schema="seacrowd_seq_label",
353
+ subset_id="massive_slot_filling",
354
+ ),
355
+ ]
356
+ )
357
+
358
+ DEFAULT_CONFIG_NAME = "massive_id-ID_source"
359
+
360
+ def _info(self) -> datasets.DatasetInfo:
361
+ if self.config.schema == "source":
362
+ features = datasets.Features(
363
+ {
364
+ "id": datasets.Value("string"),
365
+ "locale": datasets.Value("string"),
366
+ "partition": datasets.Value("string"),
367
+ "scenario": datasets.features.ClassLabel(names=_SCENARIOS),
368
+ "intent": datasets.features.ClassLabel(names=_INTENTS),
369
+ "utt": datasets.Value("string"),
370
+ "annot_utt": datasets.Value("string"),
371
+ "tokens": datasets.Sequence(datasets.Value("string")),
372
+ "ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=_TAGS)),
373
+ "worker_id": datasets.Value("string"),
374
+ "slot_method": datasets.Sequence(
375
+ {
376
+ "slot": datasets.Value("string"),
377
+ "method": datasets.Value("string"),
378
+ }
379
+ ),
380
+ "judgments": datasets.Sequence(
381
+ {
382
+ "worker_id": datasets.Value("string"),
383
+ "intent_score": datasets.Value("int8"), # [0, 1, 2]
384
+ "slots_score": datasets.Value("int8"), # [0, 1, 2]
385
+ "grammar_score": datasets.Value("int8"), # [0, 1, 2, 3, 4]
386
+ "spelling_score": datasets.Value("int8"), # [0, 1, 2]
387
+ "language_identification": datasets.Value("string"),
388
+ }
389
+ ),
390
+ }
391
+ )
392
+ elif self.config.schema == "seacrowd_text":
393
+ features = schemas.text_features(label_names=_INTENTS)
394
+ elif self.config.schema == "seacrowd_seq_label":
395
+ features = schemas.seq_label_features(label_names=_TAGS)
396
+ else:
397
+ raise ValueError(f"Invalid config schema: {self.config.schema}")
398
+
399
+ return datasets.DatasetInfo(
400
+ description=_DESCRIPTION,
401
+ features=features,
402
+ homepage=_HOMEPAGE,
403
+ license=_LICENSE,
404
+ citation=_CITATION,
405
+ )
406
+
407
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
408
+ archive = dl_manager.download(_URLS[_DATASETNAME])
409
+
410
+ return [
411
+ datasets.SplitGenerator(
412
+ name=datasets.Split.TRAIN,
413
+ gen_kwargs={
414
+ "files": dl_manager.iter_archive(archive),
415
+ "split": "train",
416
+ "lang": self.config.name,
417
+ },
418
+ ),
419
+ datasets.SplitGenerator(
420
+ name=datasets.Split.VALIDATION,
421
+ gen_kwargs={
422
+ "files": dl_manager.iter_archive(archive),
423
+ "split": "dev",
424
+ "lang": self.config.name,
425
+ },
426
+ ),
427
+ datasets.SplitGenerator(
428
+ name=datasets.Split.TEST,
429
+ gen_kwargs={
430
+ "files": dl_manager.iter_archive(archive),
431
+ "split": "test",
432
+ "lang": self.config.name,
433
+ },
434
+ ),
435
+ ]
436
+
437
+ def _get_bio_format(self, text):
438
+ """This function is modified from https://huggingface.co/datasets/qanastek/MASSIVE/blob/main/MASSIVE.py"""
439
+ tags, tokens = [], []
440
+
441
+ bio_mode = False
442
+ cpt_bio = 0
443
+ current_tag = None
444
+
445
+ split_iter = iter(text.split(" "))
446
+
447
+ for s in split_iter:
448
+ if s.startswith("["):
449
+ current_tag = s.strip("[")
450
+ bio_mode = True
451
+ cpt_bio += 1
452
+ next(split_iter)
453
+ continue
454
+
455
+ elif s.endswith("]"):
456
+ bio_mode = False
457
+ if cpt_bio == 1:
458
+ prefix = "B-"
459
+ else:
460
+ prefix = "I-"
461
+ token = prefix + current_tag
462
+ word = s.strip("]")
463
+ current_tag = None
464
+ cpt_bio = 0
465
+
466
+ else:
467
+ if bio_mode:
468
+ if cpt_bio == 1:
469
+ prefix = "B-"
470
+ else:
471
+ prefix = "I-"
472
+ token = prefix + current_tag
473
+ word = s
474
+ cpt_bio += 1
475
+ else:
476
+ token = "O"
477
+ word = s
478
+
479
+ tags.append(token)
480
+ tokens.append(word)
481
+
482
+ return tokens, tags
483
+
484
+ def _generate_examples(self, files: list, split: str, lang: str):
485
+ _id = 0
486
+
487
+ lang = lang.replace("massive_", "").replace("source", "").replace("seacrowd_text", "").replace("seacrowd_seq_label", "")
488
+
489
+ if not lang:
490
+ lang = _LANGS.copy()
491
+ else:
492
+ lang = [lang[:-1]]
493
+
494
+ # logger.info("Generating examples from = %s", ", ".join(lang))
495
+
496
+ for path, f in files:
497
+ curr_lang = path.split(f"{_SOURCE_VERSION[:-2]}/data/")[-1].split(".jsonl")[0]
498
+
499
+ if not lang:
500
+ break
501
+ elif curr_lang in lang:
502
+ lang.remove(curr_lang)
503
+ else:
504
+ continue
505
+
506
+ # Read the file
507
+ lines = f.read().decode(encoding="utf-8").split("\n")
508
+
509
+ for line in lines:
510
+ data = json.loads(line)
511
+
512
+ if data["partition"] != split:
513
+ continue
514
+
515
+ # Slot method
516
+ if "slot_method" in data:
517
+ slot_method = [
518
+ {
519
+ "slot": s["slot"],
520
+ "method": s["method"],
521
+ }
522
+ for s in data["slot_method"]
523
+ ]
524
+ else:
525
+ slot_method = []
526
+
527
+ # Judgments
528
+ if "judgments" in data:
529
+ judgments = [
530
+ {
531
+ "worker_id": j["worker_id"],
532
+ "intent_score": j["intent_score"],
533
+ "slots_score": j["slots_score"],
534
+ "grammar_score": j["grammar_score"],
535
+ "spelling_score": j["spelling_score"],
536
+ "language_identification": j["language_identification"] if "language_identification" in j else "target",
537
+ }
538
+ for j in data["judgments"]
539
+ ]
540
+ else:
541
+ judgments = []
542
+
543
+ if self.config.schema == "source":
544
+ tokens, tags = self._get_bio_format(data["annot_utt"])
545
+
546
+ yield _id, {
547
+ "id": str(_id) + "_" + data["id"],
548
+ "locale": data["locale"],
549
+ "partition": data["partition"],
550
+ "scenario": data["scenario"],
551
+ "intent": data["intent"],
552
+ "utt": data["utt"],
553
+ "annot_utt": data["annot_utt"],
554
+ "tokens": tokens,
555
+ "ner_tags": tags,
556
+ "worker_id": data["worker_id"],
557
+ "slot_method": slot_method,
558
+ "judgments": judgments,
559
+ }
560
+
561
+ elif self.config.schema == "seacrowd_seq_label":
562
+ tokens, tags = self._get_bio_format(data["annot_utt"])
563
+
564
+ yield _id, {
565
+ "id": str(_id) + "_" + data["id"],
566
+ "tokens": tokens,
567
+ "labels": tags,
568
+ }
569
+
570
+ elif self.config.schema == "seacrowd_text":
571
+ yield _id, {
572
+ "id": str(_id) + "_" + data["id"],
573
+ "text": data["utt"],
574
+ "label": data["intent"],
575
+ }
576
+
577
+ else:
578
+ raise ValueError(f"Invalid config: {self.config.name}")
579
+
580
+ _id += 1