Datasets:
Charith Peris
commited on
Commit
•
af2c01f
1
Parent(s):
de2344b
Updated massive.py to fix preview
Browse files- massive.py +77 -57
massive.py
CHANGED
@@ -468,6 +468,8 @@ _INTENTS = ['datetime_query', 'iot_hue_lightchange', 'transport_ticket', 'takeaw
|
|
468 |
'play_game', 'alarm_remove', 'lists_remove', 'transport_taxi', 'recommendation_movies',
|
469 |
'iot_coffee', 'music_query', 'play_podcasts', 'lists_query']
|
470 |
|
|
|
|
|
471 |
class MASSIVE(datasets.GeneratorBasedBuilder):
|
472 |
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
|
473 |
|
@@ -479,7 +481,13 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
479 |
) for name in _LANGUAGES
|
480 |
]
|
481 |
|
482 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
483 |
|
484 |
def _info(self):
|
485 |
return datasets.DatasetInfo(
|
@@ -516,13 +524,13 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
516 |
|
517 |
def _split_generators(self, dl_manager):
|
518 |
|
519 |
-
|
520 |
|
521 |
return [
|
522 |
datasets.SplitGenerator(
|
523 |
name=datasets.Split.TRAIN,
|
524 |
gen_kwargs={
|
525 |
-
"
|
526 |
"split": "train",
|
527 |
"lang": self.config.name,
|
528 |
}
|
@@ -530,7 +538,7 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
530 |
datasets.SplitGenerator(
|
531 |
name=datasets.Split.VALIDATION,
|
532 |
gen_kwargs={
|
533 |
-
"
|
534 |
"split": "dev",
|
535 |
"lang": self.config.name,
|
536 |
}
|
@@ -538,70 +546,82 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
538 |
datasets.SplitGenerator(
|
539 |
name=datasets.Split.TEST,
|
540 |
gen_kwargs={
|
541 |
-
"
|
542 |
"split": "test",
|
543 |
"lang": self.config.name,
|
544 |
}
|
545 |
),
|
546 |
]
|
547 |
|
548 |
-
def _generate_examples(self,
|
549 |
-
|
550 |
-
filepath = filepath + "/1.0/data/" + lang + ".jsonl"
|
551 |
|
552 |
-
|
553 |
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
|
559 |
-
|
560 |
|
561 |
-
for
|
562 |
|
563 |
-
|
564 |
|
565 |
-
if
|
|
|
|
|
|
|
|
|
566 |
continue
|
567 |
|
568 |
-
#
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
]
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
468 |
'play_game', 'alarm_remove', 'lists_remove', 'transport_taxi', 'recommendation_movies',
|
469 |
'iot_coffee', 'music_query', 'play_podcasts', 'lists_query']
|
470 |
|
471 |
+
_ALL = "all"
|
472 |
+
|
473 |
class MASSIVE(datasets.GeneratorBasedBuilder):
|
474 |
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
|
475 |
|
|
|
481 |
) for name in _LANGUAGES
|
482 |
]
|
483 |
|
484 |
+
BUILDER_CONFIGS.append(datasets.BuilderConfig(
|
485 |
+
name = _ALL,
|
486 |
+
version = datasets.Version("1.0.0"),
|
487 |
+
description = f"The MASSIVE corpora for entire corpus",
|
488 |
+
))
|
489 |
+
|
490 |
+
DEFAULT_CONFIG_NAME = _ALL
|
491 |
|
492 |
def _info(self):
|
493 |
return datasets.DatasetInfo(
|
|
|
524 |
|
525 |
def _split_generators(self, dl_manager):
|
526 |
|
527 |
+
archive = dl_manager.download(_URL)
|
528 |
|
529 |
return [
|
530 |
datasets.SplitGenerator(
|
531 |
name=datasets.Split.TRAIN,
|
532 |
gen_kwargs={
|
533 |
+
"files": dl_manager.iter_archive(archive),
|
534 |
"split": "train",
|
535 |
"lang": self.config.name,
|
536 |
}
|
|
|
538 |
datasets.SplitGenerator(
|
539 |
name=datasets.Split.VALIDATION,
|
540 |
gen_kwargs={
|
541 |
+
"files": dl_manager.iter_archive(archive),
|
542 |
"split": "dev",
|
543 |
"lang": self.config.name,
|
544 |
}
|
|
|
546 |
datasets.SplitGenerator(
|
547 |
name=datasets.Split.TEST,
|
548 |
gen_kwargs={
|
549 |
+
"files": dl_manager.iter_archive(archive),
|
550 |
"split": "test",
|
551 |
"lang": self.config.name,
|
552 |
}
|
553 |
),
|
554 |
]
|
555 |
|
556 |
+
def _generate_examples(self, files, split, lang):
|
|
|
|
|
557 |
|
558 |
+
key_ = 0
|
559 |
|
560 |
+
if lang == "all":
|
561 |
+
lang = _LANGUAGE_PAIRS.copy()
|
562 |
+
else:
|
563 |
+
lang = [lang]
|
564 |
|
565 |
+
logger.info("⏳ Generating examples from = %s", ", ".join(lang))
|
566 |
|
567 |
+
for path, f in files:
|
568 |
|
569 |
+
l = path.split("1.0/data/")[-1].split(".jsonl")[0]
|
570 |
|
571 |
+
if not lang:
|
572 |
+
break
|
573 |
+
elif l in lang:
|
574 |
+
lang.remove(l)
|
575 |
+
else:
|
576 |
continue
|
577 |
|
578 |
+
# Read the file
|
579 |
+
lines = f.read().decode(encoding="utf-8").split("\n")
|
580 |
+
|
581 |
+
for line in lines:
|
582 |
+
|
583 |
+
data = json.loads(line)
|
584 |
+
|
585 |
+
if data["partition"] != split:
|
586 |
+
continue
|
587 |
+
|
588 |
+
# Slot method
|
589 |
+
if "slot_method" in data:
|
590 |
+
slot_method = [
|
591 |
+
{
|
592 |
+
"slot": s["slot"],
|
593 |
+
"method": s["method"],
|
594 |
+
} for s in data["slot_method"]
|
595 |
+
]
|
596 |
+
else:
|
597 |
+
slot_method = []
|
598 |
+
|
599 |
+
# Judgments
|
600 |
+
if "judgments" in data:
|
601 |
+
judgments = [
|
602 |
+
{
|
603 |
+
"worker_id": j["worker_id"],
|
604 |
+
"intent_score": j["intent_score"],
|
605 |
+
"slots_score": j["slots_score"],
|
606 |
+
"grammar_score": j["grammar_score"],
|
607 |
+
"spelling_score": j["spelling_score"],
|
608 |
+
"language_identification": j["language_identification"] if "language_identification" in j else "target",
|
609 |
+
} for j in data["judgments"]
|
610 |
+
]
|
611 |
+
else:
|
612 |
+
judgments = []
|
613 |
+
|
614 |
+
yield key_, {
|
615 |
+
"id": data["id"],
|
616 |
+
"locale": data["locale"],
|
617 |
+
"partition": data["partition"],
|
618 |
+
"scenario": data["scenario"],
|
619 |
+
"intent": data["intent"],
|
620 |
+
"utt": data["utt"],
|
621 |
+
"annot_utt": data["annot_utt"],
|
622 |
+
"worker_id": data["worker_id"],
|
623 |
+
"slot_method": slot_method,
|
624 |
+
"judgments": judgments,
|
625 |
+
}
|
626 |
+
|
627 |
+
key_ += 1
|