Charith Peris commited on
Commit
af2c01f
1 Parent(s): de2344b

Updated massive.py to fix preview

Browse files
Files changed (1) hide show
  1. massive.py +77 -57
massive.py CHANGED
@@ -468,6 +468,8 @@ _INTENTS = ['datetime_query', 'iot_hue_lightchange', 'transport_ticket', 'takeaw
468
  'play_game', 'alarm_remove', 'lists_remove', 'transport_taxi', 'recommendation_movies',
469
  'iot_coffee', 'music_query', 'play_podcasts', 'lists_query']
470
 
 
 
471
  class MASSIVE(datasets.GeneratorBasedBuilder):
472
  """MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
473
 
@@ -479,7 +481,13 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
479
  ) for name in _LANGUAGES
480
  ]
481
 
482
- DEFAULT_CONFIG_NAME = "en-US"
 
 
 
 
 
 
483
 
484
  def _info(self):
485
  return datasets.DatasetInfo(
@@ -516,13 +524,13 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
516
 
517
  def _split_generators(self, dl_manager):
518
 
519
- path = dl_manager.download_and_extract(_URL)
520
 
521
  return [
522
  datasets.SplitGenerator(
523
  name=datasets.Split.TRAIN,
524
  gen_kwargs={
525
- "filepath": path,
526
  "split": "train",
527
  "lang": self.config.name,
528
  }
@@ -530,7 +538,7 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
530
  datasets.SplitGenerator(
531
  name=datasets.Split.VALIDATION,
532
  gen_kwargs={
533
- "filepath": path,
534
  "split": "dev",
535
  "lang": self.config.name,
536
  }
@@ -538,70 +546,82 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
538
  datasets.SplitGenerator(
539
  name=datasets.Split.TEST,
540
  gen_kwargs={
541
- "filepath": path,
542
  "split": "test",
543
  "lang": self.config.name,
544
  }
545
  ),
546
  ]
547
 
548
- def _generate_examples(self, filepath, split, lang):
549
-
550
- filepath = filepath + "/1.0/data/" + lang + ".jsonl"
551
 
552
- logger.info("⏳ Generating examples from = %s", filepath)
553
 
554
- # Read the file
555
- f = open(filepath,"r")
556
- lines = f.read().split("\n")
557
- f.close()
558
 
559
- key_ = 0
560
 
561
- for line in lines:
562
 
563
- data = json.loads(line)
564
 
565
- if data["partition"] != split:
 
 
 
 
566
  continue
567
 
568
- # Slot method
569
- if "slot_method" in data:
570
- slot_method = [
571
- {
572
- "slot": s["slot"],
573
- "method": s["method"],
574
- } for s in data["slot_method"]
575
- ]
576
- else:
577
- slot_method = []
578
-
579
- # Judgments
580
- if "judgments" in data:
581
- judgments = [
582
- {
583
- "worker_id": j["worker_id"],
584
- "intent_score": j["intent_score"],
585
- "slots_score": j["slots_score"],
586
- "grammar_score": j["grammar_score"],
587
- "spelling_score": j["spelling_score"],
588
- "language_identification": j["language_identification"],
589
- } for j in data["judgments"]
590
- ]
591
- else:
592
- judgments = []
593
-
594
- yield key_, {
595
- "id": data["id"],
596
- "locale": data["locale"],
597
- "partition": data["partition"],
598
- "scenario": data["scenario"],
599
- "intent": data["intent"],
600
- "utt": data["utt"],
601
- "annot_utt": data["annot_utt"],
602
- "worker_id": data["worker_id"],
603
- "slot_method": slot_method,
604
- "judgments": judgments,
605
- }
606
-
607
- key_ += 1
 
 
 
 
 
 
 
 
 
 
 
468
  'play_game', 'alarm_remove', 'lists_remove', 'transport_taxi', 'recommendation_movies',
469
  'iot_coffee', 'music_query', 'play_podcasts', 'lists_query']
470
 
471
+ _ALL = "all"
472
+
473
  class MASSIVE(datasets.GeneratorBasedBuilder):
474
  """MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
475
 
 
481
  ) for name in _LANGUAGES
482
  ]
483
 
484
+ BUILDER_CONFIGS.append(datasets.BuilderConfig(
485
+ name = _ALL,
486
+ version = datasets.Version("1.0.0"),
487
+ description = f"The MASSIVE corpora for entire corpus",
488
+ ))
489
+
490
+ DEFAULT_CONFIG_NAME = _ALL
491
 
492
  def _info(self):
493
  return datasets.DatasetInfo(
 
524
 
525
  def _split_generators(self, dl_manager):
526
 
527
+ archive = dl_manager.download(_URL)
528
 
529
  return [
530
  datasets.SplitGenerator(
531
  name=datasets.Split.TRAIN,
532
  gen_kwargs={
533
+ "files": dl_manager.iter_archive(archive),
534
  "split": "train",
535
  "lang": self.config.name,
536
  }
 
538
  datasets.SplitGenerator(
539
  name=datasets.Split.VALIDATION,
540
  gen_kwargs={
541
+ "files": dl_manager.iter_archive(archive),
542
  "split": "dev",
543
  "lang": self.config.name,
544
  }
 
546
  datasets.SplitGenerator(
547
  name=datasets.Split.TEST,
548
  gen_kwargs={
549
+ "files": dl_manager.iter_archive(archive),
550
  "split": "test",
551
  "lang": self.config.name,
552
  }
553
  ),
554
  ]
555
 
556
+ def _generate_examples(self, files, split, lang):
 
 
557
 
558
+ key_ = 0
559
 
560
+ if lang == "all":
561
+ lang = _LANGUAGE_PAIRS.copy()
562
+ else:
563
+ lang = [lang]
564
 
565
+ logger.info("⏳ Generating examples from = %s", ", ".join(lang))
566
 
567
+ for path, f in files:
568
 
569
+ l = path.split("1.0/data/")[-1].split(".jsonl")[0]
570
 
571
+ if not lang:
572
+ break
573
+ elif l in lang:
574
+ lang.remove(l)
575
+ else:
576
  continue
577
 
578
+ # Read the file
579
+ lines = f.read().decode(encoding="utf-8").split("\n")
580
+
581
+ for line in lines:
582
+
583
+ data = json.loads(line)
584
+
585
+ if data["partition"] != split:
586
+ continue
587
+
588
+ # Slot method
589
+ if "slot_method" in data:
590
+ slot_method = [
591
+ {
592
+ "slot": s["slot"],
593
+ "method": s["method"],
594
+ } for s in data["slot_method"]
595
+ ]
596
+ else:
597
+ slot_method = []
598
+
599
+ # Judgments
600
+ if "judgments" in data:
601
+ judgments = [
602
+ {
603
+ "worker_id": j["worker_id"],
604
+ "intent_score": j["intent_score"],
605
+ "slots_score": j["slots_score"],
606
+ "grammar_score": j["grammar_score"],
607
+ "spelling_score": j["spelling_score"],
608
+ "language_identification": j["language_identification"] if "language_identification" in j else "target",
609
+ } for j in data["judgments"]
610
+ ]
611
+ else:
612
+ judgments = []
613
+
614
+ yield key_, {
615
+ "id": data["id"],
616
+ "locale": data["locale"],
617
+ "partition": data["partition"],
618
+ "scenario": data["scenario"],
619
+ "intent": data["intent"],
620
+ "utt": data["utt"],
621
+ "annot_utt": data["annot_utt"],
622
+ "worker_id": data["worker_id"],
623
+ "slot_method": slot_method,
624
+ "judgments": judgments,
625
+ }
626
+
627
+ key_ += 1