Datasets:
Massive v1.1 loader (#4)
Browse files- Updated to support massive v1.1 (afae233f9bfa81b875488373ea7b4f5ddfd78a15)
- Updated readme (36c5138326d041a6015b7a748fa4703e1de95bdd)
- README.md +3 -1
- massive.py +51 -18
- test_massive.py +11 -0
README.md
CHANGED
@@ -11,6 +11,7 @@ multilinguality:
|
|
11 |
- ar-SA
|
12 |
- az-AZ
|
13 |
- bn-BD
|
|
|
14 |
- cy-GB
|
15 |
- da-DK
|
16 |
- de-DE
|
@@ -190,13 +191,14 @@ The dataset can be used to train a model for `natural-language-understanding` (N
|
|
190 |
|
191 |
### Languages
|
192 |
|
193 |
-
The corpora consists of parallel sentences from
|
194 |
|
195 |
- `Afrikaans - South Africa (af-ZA)`
|
196 |
- `Amharic - Ethiopia (am-ET)`
|
197 |
- `Arabic - Saudi Arabia (ar-SA)`
|
198 |
- `Azeri - Azerbaijan (az-AZ)`
|
199 |
- `Bengali - Bangladesh (bn-BD)`
|
|
|
200 |
- `Chinese - China (zh-CN)`
|
201 |
- `Chinese - Taiwan (zh-TW)`
|
202 |
- `Danish - Denmark (da-DK)`
|
|
|
11 |
- ar-SA
|
12 |
- az-AZ
|
13 |
- bn-BD
|
14 |
+
- ca-ES
|
15 |
- cy-GB
|
16 |
- da-DK
|
17 |
- de-DE
|
|
|
191 |
|
192 |
### Languages
|
193 |
|
194 |
+
The massive 1.1 corpora consists of parallel sentences from 52 languages :
|
195 |
|
196 |
- `Afrikaans - South Africa (af-ZA)`
|
197 |
- `Amharic - Ethiopia (am-ET)`
|
198 |
- `Arabic - Saudi Arabia (ar-SA)`
|
199 |
- `Azeri - Azerbaijan (az-AZ)`
|
200 |
- `Bengali - Bangladesh (bn-BD)`
|
201 |
+
- `Catalan - Spain (ca-ES)`
|
202 |
- `Chinese - China (zh-CN)`
|
203 |
- `Chinese - Taiwan (zh-TW)`
|
204 |
- `Danish - Denmark (da-DK)`
|
massive.py
CHANGED
@@ -441,16 +441,22 @@ licenses.
|
|
441 |
|
442 |
Creative Commons may be contacted at creativecommons.org.
|
443 |
"""
|
|
|
|
|
|
|
|
|
444 |
|
445 |
-
|
446 |
-
|
447 |
-
_LANGUAGES = ['af-ZA', 'am-ET', 'ar-SA', 'az-AZ', 'bn-BD', 'cy-GB', 'da-DK', 'de-DE', 'el-GR', 'en-US',
|
448 |
'es-ES', 'fa-IR', 'fi-FI', 'fr-FR', 'he-IL', 'hi-IN', 'hu-HU', 'hy-AM', 'id-ID', 'is-IS',
|
449 |
'it-IT', 'ja-JP', 'jv-ID', 'ka-GE', 'km-KH', 'kn-IN', 'ko-KR', 'lv-LV', 'ml-IN', 'mn-MN',
|
450 |
'ms-MY', 'my-MM', 'nb-NO', 'nl-NL', 'pl-PL', 'pt-PT', 'ro-RO', 'ru-RU', 'sl-SL', 'sq-AL',
|
451 |
'sv-SE', 'sw-KE', 'ta-IN', 'te-IN', 'th-TH', 'tl-PH', 'tr-TR', 'ur-PK', 'vi-VN', 'zh-CN',
|
452 |
'zh-TW']
|
453 |
|
|
|
|
|
|
|
454 |
_SCENARIOS = ['social', 'transport', 'calendar', 'play', 'news', 'datetime', 'recommendation', 'email',
|
455 |
'iot', 'general', 'audio', 'lists', 'qa', 'cooking', 'takeaway', 'music', 'alarm', 'weather']
|
456 |
|
@@ -470,22 +476,48 @@ _INTENTS = ['datetime_query', 'iot_hue_lightchange', 'transport_ticket', 'takeaw
|
|
470 |
|
471 |
_ALL = "all"
|
472 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
473 |
class MASSIVE(datasets.GeneratorBasedBuilder):
|
474 |
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
|
475 |
|
|
|
476 |
BUILDER_CONFIGS = [
|
477 |
-
|
478 |
name = name,
|
|
|
479 |
version = datasets.Version("1.0.0"),
|
480 |
-
description = f"The MASSIVE corpora for {name}",
|
481 |
-
) for name in
|
482 |
-
|
483 |
-
|
484 |
-
BUILDER_CONFIGS.append(
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
489 |
|
490 |
DEFAULT_CONFIG_NAME = _ALL
|
491 |
|
@@ -524,8 +556,7 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
524 |
|
525 |
def _split_generators(self, dl_manager):
|
526 |
|
527 |
-
archive = dl_manager.download(
|
528 |
-
|
529 |
return [
|
530 |
datasets.SplitGenerator(
|
531 |
name=datasets.Split.TRAIN,
|
@@ -557,8 +588,10 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
557 |
|
558 |
key_ = 0
|
559 |
|
560 |
-
if lang == "
|
561 |
-
lang =
|
|
|
|
|
562 |
else:
|
563 |
lang = [lang]
|
564 |
|
@@ -566,7 +599,7 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
566 |
|
567 |
for path, f in files:
|
568 |
|
569 |
-
l = path.split("
|
570 |
|
571 |
if not lang:
|
572 |
break
|
|
|
441 |
|
442 |
Creative Commons may be contacted at creativecommons.org.
|
443 |
"""
|
444 |
+
# version 1.0
|
445 |
+
_URL0 = "https://amazon-massive-nlu-dataset.s3.amazonaws.com/amazon-massive-dataset-1.0.tar.gz"
|
446 |
+
# version 1.1
|
447 |
+
_URL1 = "https://amazon-massive-nlu-dataset.s3.amazonaws.com/amazon-massive-dataset-1.1.tar.gz"
|
448 |
|
449 |
+
# version 1.0
|
450 |
+
_LANGUAGES0 = ['af-ZA', 'am-ET', 'ar-SA', 'az-AZ', 'bn-BD', 'cy-GB', 'da-DK', 'de-DE', 'el-GR', 'en-US',
|
|
|
451 |
'es-ES', 'fa-IR', 'fi-FI', 'fr-FR', 'he-IL', 'hi-IN', 'hu-HU', 'hy-AM', 'id-ID', 'is-IS',
|
452 |
'it-IT', 'ja-JP', 'jv-ID', 'ka-GE', 'km-KH', 'kn-IN', 'ko-KR', 'lv-LV', 'ml-IN', 'mn-MN',
|
453 |
'ms-MY', 'my-MM', 'nb-NO', 'nl-NL', 'pl-PL', 'pt-PT', 'ro-RO', 'ru-RU', 'sl-SL', 'sq-AL',
|
454 |
'sv-SE', 'sw-KE', 'ta-IN', 'te-IN', 'th-TH', 'tl-PH', 'tr-TR', 'ur-PK', 'vi-VN', 'zh-CN',
|
455 |
'zh-TW']
|
456 |
|
457 |
+
# version 1.1 ('ca-ES' added)
|
458 |
+
_LANGUAGES1 = sorted(_LANGUAGES0 + ['ca-ES'])
|
459 |
+
|
460 |
_SCENARIOS = ['social', 'transport', 'calendar', 'play', 'news', 'datetime', 'recommendation', 'email',
|
461 |
'iot', 'general', 'audio', 'lists', 'qa', 'cooking', 'takeaway', 'music', 'alarm', 'weather']
|
462 |
|
|
|
476 |
|
477 |
_ALL = "all"
|
478 |
|
479 |
+
|
480 |
+
class MassiveConfig(datasets.BuilderConfig):
|
481 |
+
"""BuilderConfig for MASSIVE."""
|
482 |
+
|
483 |
+
def __init__(self, dataset_version=None, *args, **kwargs):
|
484 |
+
"""BuilderConfig for MASSIVE.
|
485 |
+
Args:
|
486 |
+
**kwargs: keyword arguments forwarded to super.
|
487 |
+
"""
|
488 |
+
super(MassiveConfig, self).__init__(*args, **kwargs)
|
489 |
+
self.dataset_version = dataset_version if dataset_version else "1.0"
|
490 |
+
self.data_url = _URL0 if self.dataset_version == "1.0" else _URL1
|
491 |
+
|
492 |
+
|
493 |
class MASSIVE(datasets.GeneratorBasedBuilder):
|
494 |
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
|
495 |
|
496 |
+
# All individual locale datasets are served from the latest version.
|
497 |
BUILDER_CONFIGS = [
|
498 |
+
MassiveConfig(
|
499 |
name = name,
|
500 |
+
dataset_version = '1.1',
|
501 |
version = datasets.Version("1.0.0"),
|
502 |
+
description = f"The MASSIVE v1.1 corpora for {name}",
|
503 |
+
) for name in _LANGUAGES1
|
504 |
+
]
|
505 |
+
# Version 1.0
|
506 |
+
BUILDER_CONFIGS.append(MassiveConfig(
|
507 |
+
name = _ALL,
|
508 |
+
dataset_version = '1.0',
|
509 |
+
version = datasets.Version("1.0.0"),
|
510 |
+
description = f"The MASSIVE v1.0 corpora for entire corpus",
|
511 |
+
)
|
512 |
+
)
|
513 |
+
# Version 1.1
|
514 |
+
BUILDER_CONFIGS.append(MassiveConfig(
|
515 |
+
name = _ALL + '_1.1', # Append '_1.1' to 'all' in order to get the latest version.
|
516 |
+
dataset_version = '1.1',
|
517 |
+
version = datasets.Version("1.0.0"),
|
518 |
+
description = f"The MASSIVE v1.1 corpora for entire corpus",
|
519 |
+
)
|
520 |
+
)
|
521 |
|
522 |
DEFAULT_CONFIG_NAME = _ALL
|
523 |
|
|
|
556 |
|
557 |
def _split_generators(self, dl_manager):
|
558 |
|
559 |
+
archive = dl_manager.download(self.config.data_url)
|
|
|
560 |
return [
|
561 |
datasets.SplitGenerator(
|
562 |
name=datasets.Split.TRAIN,
|
|
|
588 |
|
589 |
key_ = 0
|
590 |
|
591 |
+
if lang == "all_1.1":
|
592 |
+
lang = _LANGUAGES1.copy()
|
593 |
+
elif lang == "all":
|
594 |
+
lang = _LANGUAGES0.copy()
|
595 |
else:
|
596 |
lang = [lang]
|
597 |
|
|
|
599 |
|
600 |
for path, f in files:
|
601 |
|
602 |
+
l = path.split("/")[-1].split(".")[0]
|
603 |
|
604 |
if not lang:
|
605 |
break
|
test_massive.py
CHANGED
@@ -2,6 +2,17 @@ from datasets import load_dataset
|
|
2 |
|
3 |
source = "AmazonScience/massive"
|
4 |
|
|
|
5 |
dataset = load_dataset(source, "en-US", download_mode="force_redownload")
|
6 |
print(dataset)
|
7 |
print(dataset["train"][0])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
source = "AmazonScience/massive"
|
4 |
|
5 |
+
#Individual locales are downloaded from version 1.1 which has Catalan (ca-ES) added in
|
6 |
dataset = load_dataset(source, "en-US", download_mode="force_redownload")
|
7 |
print(dataset)
|
8 |
print(dataset["train"][0])
|
9 |
+
|
10 |
+
#Using version 1.1 which has Catalan (ca-ES) added in
|
11 |
+
dataset = load_dataset(source, "all_v1.1", download_mode="force_redownload")
|
12 |
+
print(dataset)
|
13 |
+
print(dataset["train"][0])
|
14 |
+
|
15 |
+
#Using version 1.0
|
16 |
+
dataset = load_dataset(source, "all", download_mode="force_redownload")
|
17 |
+
print(dataset)
|
18 |
+
print(dataset["train"][0])
|