test111 / test111.py
nouamanetazi's picture
nouamanetazi HF staff
test
6a41ef9
raw
history blame
8.48 kB
# coding=utf-8
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
MASSIVE is a parallel dataset of > 1M utterances across 51 languages with annotations
for the Natural Language Understanding tasks of intent prediction and slot annotation.
Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing
the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions.
"""
_URL = "https://amazon-massive-nlu-dataset.s3.amazonaws.com/amazon-massive-dataset-1.0.tar.gz"
_LANGUAGES = [
"af-ZA",
"am-ET",
"ar-SA",
"az-AZ",
"bn-BD",
"cy-GB",
"da-DK",
"de-DE",
"el-GR",
"en-US",
"es-ES",
"fa-IR",
"fi-FI",
"fr-FR",
"he-IL",
"hi-IN",
"hu-HU",
"hy-AM",
"id-ID",
"is-IS",
"it-IT",
"ja-JP",
"jv-ID",
"ka-GE",
"km-KH",
"kn-IN",
"ko-KR",
"lv-LV",
"ml-IN",
"mn-MN",
"ms-MY",
"my-MM",
"nb-NO",
"nl-NL",
"pl-PL",
"pt-PT",
"ro-RO",
"ru-RU",
"sl-SL",
"sq-AL",
"sv-SE",
"sw-KE",
"ta-IN",
"te-IN",
"th-TH",
"tl-PH",
"tr-TR",
"ur-PK",
"vi-VN",
"zh-CN",
"zh-TW",
]
_SCENARIOS = [
"social",
"transport",
"calendar",
"play",
"news",
"datetime",
"recommendation",
"email",
"iot",
"general",
"audio",
"lists",
"qa",
"cooking",
"takeaway",
"music",
"alarm",
"weather",
]
_INTENTS = [
"datetime_query",
"iot_hue_lightchange",
"transport_ticket",
"takeaway_query",
"qa_stock",
"general_greet",
"recommendation_events",
"music_dislikeness",
"iot_wemo_off",
"cooking_recipe",
"qa_currency",
"transport_traffic",
"general_quirky",
"weather_query",
"audio_volume_up",
"email_addcontact",
"takeaway_order",
"email_querycontact",
"iot_hue_lightup",
"recommendation_locations",
"play_audiobook",
"lists_createoradd",
"news_query",
"alarm_query",
"iot_wemo_on",
"general_joke",
"qa_definition",
"social_query",
"music_settings",
"audio_volume_other",
"calendar_remove",
"iot_hue_lightdim",
"calendar_query",
"email_sendemail",
"iot_cleaning",
"audio_volume_down",
"play_radio",
"cooking_query",
"datetime_convert",
"qa_maths",
"iot_hue_lightoff",
"iot_hue_lighton",
"transport_query",
"music_likeness",
"email_query",
"play_music",
"audio_volume_mute",
"social_post",
"alarm_set",
"qa_factoid",
"calendar_set",
"play_game",
"alarm_remove",
"lists_remove",
"transport_taxi",
"recommendation_movies",
"iot_coffee",
"music_query",
"play_podcasts",
"lists_query",
]
class MASSIVE(datasets.GeneratorBasedBuilder):
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=name,
version=datasets.Version("1.0.0"),
description=f"The MASSIVE corpora for {name}",
)
for name in _LANGUAGES
]
DEFAULT_CONFIG_NAME = "en-US"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"locale": datasets.Value("string"),
"partition": datasets.Value("string"),
"scenario": datasets.features.ClassLabel(names=_SCENARIOS),
"intent": datasets.features.ClassLabel(names=_INTENTS),
"utt": datasets.Value("string"),
"annot_utt": datasets.Value("string"),
"worker_id": datasets.Value("string"),
"slot_method": datasets.Sequence(
{
"slot": datasets.Value("string"),
"method": datasets.Value("string"),
}
),
"judgments": datasets.Sequence(
{
"worker_id": datasets.Value("string"),
"intent_score": datasets.Value("int8"),
"slots_score": datasets.Value("int8"),
"grammar_score": datasets.Value("int8"),
"spelling_score": datasets.Value("int8"),
"language_identification": datasets.Value("string"),
}
),
},
),
supervised_keys=None,
homepage="https://github.com/alexa/massive",
citation="_CITATION",
license="_LICENSE",
)
def _split_generators(self, dl_manager):
# path = dl_manager.download_and_extract(_URL)
archive_path = dl_manager.download(_URL)
files = dl_manager.iter_archive(archive_path)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": files,
"split": "train",
"lang": self.config.name,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"files": files,
"split": "dev",
"lang": self.config.name,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": files,
"split": "test",
"lang": self.config.name,
},
),
]
def _generate_examples(self, files, split, lang):
filepath = "1.0/data/" + lang + ".jsonl"
logger.info("⏳ Generating examples from = %s", filepath)
for path, f in files:
print("Path: ", path)
if path == filepath:
# Read the file
print("f: ", f)
lines = f.readlines()
f.close()
key_ = 0
for line in lines:
data = json.loads(line)
if data["partition"] != split:
continue
# Slot method
if "slot_method" in data:
slot_method = [
{
"slot": s["slot"],
"method": s["method"],
}
for s in data["slot_method"]
]
else:
slot_method = []
# Judgments
if "judgments" in data:
judgments = [
{
"worker_id": j["worker_id"],
"intent_score": j["intent_score"],
"slots_score": j["slots_score"],
"grammar_score": j["grammar_score"],
"spelling_score": j["spelling_score"],
"language_identification": j["language_identification"],
}
for j in data["judgments"]
]
else:
judgments = []
yield key_, {
"id": data["id"],
"locale": data["locale"],
"partition": data["partition"],
"scenario": data["scenario"],
"intent": data["intent"],
"utt": data["utt"],
"annot_utt": data["annot_utt"],
"worker_id": data["worker_id"],
"slot_method": slot_method,
"judgments": judgments,
}
key_ += 1