Update files from the datasets library (from 1.9.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.9.0
- dataset_infos.json +0 -0
- xtreme.py +125 -44
dataset_infos.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
xtreme.py
CHANGED
@@ -220,10 +220,8 @@ _DESCRIPTIONS = {
|
|
220 |
"tatoeba": textwrap.dedent(
|
221 |
"""\
|
222 |
his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.
|
223 |
-
|
224 |
For each languages, we have selected 1000 English sentences and their translations, if available. Please check
|
225 |
this paper for a description of the languages, their families and scripts as well as baseline results.
|
226 |
-
|
227 |
Please note that the English sentences are not identical for all language pairs. This means that the results are
|
228 |
not directly comparable across languages. In particular, the sentences tend to have less variety for several
|
229 |
low-resource languages, e.g. "Tom needed water", "Tom needs water", "Tom is getting water", ...
|
@@ -352,15 +350,52 @@ _CITATIONS = {
|
|
352 |
}
|
353 |
|
354 |
_TEXT_FEATURES = {
|
355 |
-
"XNLI": {
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
361 |
"PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"},
|
362 |
-
"udpos": {"
|
363 |
-
"SQuAD": {
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
"PAN-X": {"tokens": "", "ner_tags": "", "lang": ""},
|
365 |
}
|
366 |
_DATA_URLS = {
|
@@ -395,7 +430,6 @@ class XtremeConfig(datasets.BuilderConfig):
|
|
395 |
|
396 |
def __init__(self, data_url, citation, url, text_features, **kwargs):
|
397 |
"""
|
398 |
-
|
399 |
Args:
|
400 |
text_features: `dict[string, string]`, map from the name of the feature
|
401 |
dict for each text field to the name of the column in the tsv file
|
@@ -432,7 +466,10 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
432 |
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
|
433 |
if "answers" in features.keys():
|
434 |
features["answers"] = datasets.features.Sequence(
|
435 |
-
{
|
|
|
|
|
|
|
436 |
)
|
437 |
if self.config.name.startswith("PAWS-X"):
|
438 |
features["label"] = datasets.Value("string")
|
@@ -442,27 +479,29 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
442 |
if self.config.name.startswith("udpos"):
|
443 |
features = datasets.Features(
|
444 |
{
|
445 |
-
"
|
446 |
-
"
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
|
|
|
|
466 |
),
|
467 |
}
|
468 |
)
|
@@ -535,10 +574,12 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
535 |
data_dir = os.path.join(dl_dir, "XNLI-1.0")
|
536 |
return [
|
537 |
datasets.SplitGenerator(
|
538 |
-
name=datasets.Split.TEST,
|
|
|
539 |
),
|
540 |
datasets.SplitGenerator(
|
541 |
-
name=datasets.Split.VALIDATION,
|
|
|
542 |
),
|
543 |
]
|
544 |
|
@@ -625,10 +666,16 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
625 |
if self.config.name.startswith("bucc18"):
|
626 |
lang = self.config.name.split(".")[1]
|
627 |
bucc18_dl_test_dir = dl_manager.download_and_extract(
|
628 |
-
os.path.join(
|
|
|
|
|
|
|
629 |
)
|
630 |
bucc18_dl_dev_dir = dl_manager.download_and_extract(
|
631 |
-
os.path.join(
|
|
|
|
|
|
|
632 |
)
|
633 |
return [
|
634 |
datasets.SplitGenerator(
|
@@ -742,9 +789,13 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
742 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
743 |
|
744 |
return [
|
745 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
746 |
datasets.SplitGenerator(
|
747 |
-
name=datasets.Split.
|
|
|
|
|
|
|
|
|
|
|
748 |
),
|
749 |
]
|
750 |
|
@@ -796,7 +847,10 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
796 |
"context": context,
|
797 |
"question": question,
|
798 |
"id": id_,
|
799 |
-
"answers": {
|
|
|
|
|
|
|
800 |
}
|
801 |
if self.config.name == "XNLI":
|
802 |
with open(filepath, encoding="utf-8") as f:
|
@@ -814,7 +868,11 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
814 |
next(data) # skip header
|
815 |
for id_, row in enumerate(data):
|
816 |
if len(row) == 4:
|
817 |
-
yield id_, {
|
|
|
|
|
|
|
|
|
818 |
if self.config.name.startswith("XQuAD"):
|
819 |
with open(filepath, encoding="utf-8") as f:
|
820 |
xquad = json.load(f)
|
@@ -834,7 +892,10 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
834 |
"context": context,
|
835 |
"question": question,
|
836 |
"id": id_,
|
837 |
-
"answers": {
|
|
|
|
|
|
|
838 |
}
|
839 |
if self.config.name.startswith("bucc18"):
|
840 |
files = sorted(os.listdir(filepath))
|
@@ -900,9 +961,19 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
900 |
for id_file, file in enumerate(filepath):
|
901 |
with open(file, encoding="utf-8") as f:
|
902 |
data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
|
|
|
|
903 |
for id_row, row in enumerate(data):
|
904 |
if len(row) >= 10 and row[1] != "_" and row[3] != "_":
|
905 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
906 |
if self.config.name.startswith("PAN-X"):
|
907 |
guid_index = 1
|
908 |
with open(filepath, encoding="utf-8") as f:
|
@@ -912,7 +983,11 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
912 |
for line in f:
|
913 |
if line == "" or line == "\n":
|
914 |
if tokens:
|
915 |
-
yield guid_index, {
|
|
|
|
|
|
|
|
|
916 |
guid_index += 1
|
917 |
tokens = []
|
918 |
ner_tags = []
|
@@ -928,3 +1003,9 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
928 |
else:
|
929 |
# examples have no label in test set
|
930 |
ner_tags.append("O")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
"tatoeba": textwrap.dedent(
|
221 |
"""\
|
222 |
his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.
|
|
|
223 |
For each languages, we have selected 1000 English sentences and their translations, if available. Please check
|
224 |
this paper for a description of the languages, their families and scripts as well as baseline results.
|
|
|
225 |
Please note that the English sentences are not identical for all language pairs. This means that the results are
|
226 |
not directly comparable across languages. In particular, the sentences tend to have less variety for several
|
227 |
low-resource languages, e.g. "Tom needed water", "Tom needs water", "Tom is getting water", ...
|
|
|
350 |
}
|
351 |
|
352 |
_TEXT_FEATURES = {
|
353 |
+
"XNLI": {
|
354 |
+
"language": "language",
|
355 |
+
"sentence1": "sentence1",
|
356 |
+
"sentence2": "sentence2",
|
357 |
+
},
|
358 |
+
"tydiqa": {
|
359 |
+
"id": "id",
|
360 |
+
"title": "title",
|
361 |
+
"context": "context",
|
362 |
+
"question": "question",
|
363 |
+
"answers": "answers",
|
364 |
+
},
|
365 |
+
"XQuAD": {
|
366 |
+
"id": "id",
|
367 |
+
"context": "context",
|
368 |
+
"question": "question",
|
369 |
+
"answers": "answers",
|
370 |
+
},
|
371 |
+
"MLQA": {
|
372 |
+
"id": "id",
|
373 |
+
"title": "title",
|
374 |
+
"context": "context",
|
375 |
+
"question": "question",
|
376 |
+
"answers": "answers",
|
377 |
+
},
|
378 |
+
"tatoeba": {
|
379 |
+
"source_sentence": "",
|
380 |
+
"target_sentence": "",
|
381 |
+
"source_lang": "",
|
382 |
+
"target_lang": "",
|
383 |
+
},
|
384 |
+
"bucc18": {
|
385 |
+
"source_sentence": "",
|
386 |
+
"target_sentence": "",
|
387 |
+
"source_lang": "",
|
388 |
+
"target_lang": "",
|
389 |
+
},
|
390 |
"PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"},
|
391 |
+
"udpos": {"tokens": "", "pos_tags": ""},
|
392 |
+
"SQuAD": {
|
393 |
+
"id": "id",
|
394 |
+
"title": "title",
|
395 |
+
"context": "context",
|
396 |
+
"question": "question",
|
397 |
+
"answers": "answers",
|
398 |
+
},
|
399 |
"PAN-X": {"tokens": "", "ner_tags": "", "lang": ""},
|
400 |
}
|
401 |
_DATA_URLS = {
|
|
|
430 |
|
431 |
def __init__(self, data_url, citation, url, text_features, **kwargs):
|
432 |
"""
|
|
|
433 |
Args:
|
434 |
text_features: `dict[string, string]`, map from the name of the feature
|
435 |
dict for each text field to the name of the column in the tsv file
|
|
|
466 |
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
|
467 |
if "answers" in features.keys():
|
468 |
features["answers"] = datasets.features.Sequence(
|
469 |
+
{
|
470 |
+
"answer_start": datasets.Value("int32"),
|
471 |
+
"text": datasets.Value("string"),
|
472 |
+
}
|
473 |
)
|
474 |
if self.config.name.startswith("PAWS-X"):
|
475 |
features["label"] = datasets.Value("string")
|
|
|
479 |
if self.config.name.startswith("udpos"):
|
480 |
features = datasets.Features(
|
481 |
{
|
482 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
483 |
+
"pos_tags": datasets.Sequence(
|
484 |
+
datasets.features.ClassLabel(
|
485 |
+
names=[
|
486 |
+
"ADJ",
|
487 |
+
"ADP",
|
488 |
+
"ADV",
|
489 |
+
"AUX",
|
490 |
+
"CCONJ",
|
491 |
+
"DET",
|
492 |
+
"INTJ",
|
493 |
+
"NOUN",
|
494 |
+
"NUM",
|
495 |
+
"PART",
|
496 |
+
"PRON",
|
497 |
+
"PROPN",
|
498 |
+
"PUNCT",
|
499 |
+
"SCONJ",
|
500 |
+
"SYM",
|
501 |
+
"VERB",
|
502 |
+
"X",
|
503 |
+
]
|
504 |
+
)
|
505 |
),
|
506 |
}
|
507 |
)
|
|
|
574 |
data_dir = os.path.join(dl_dir, "XNLI-1.0")
|
575 |
return [
|
576 |
datasets.SplitGenerator(
|
577 |
+
name=datasets.Split.TEST,
|
578 |
+
gen_kwargs={"filepath": os.path.join(data_dir, "xnli.test.tsv")},
|
579 |
),
|
580 |
datasets.SplitGenerator(
|
581 |
+
name=datasets.Split.VALIDATION,
|
582 |
+
gen_kwargs={"filepath": os.path.join(data_dir, "xnli.dev.tsv")},
|
583 |
),
|
584 |
]
|
585 |
|
|
|
666 |
if self.config.name.startswith("bucc18"):
|
667 |
lang = self.config.name.split(".")[1]
|
668 |
bucc18_dl_test_dir = dl_manager.download_and_extract(
|
669 |
+
os.path.join(
|
670 |
+
self.config.data_url,
|
671 |
+
"bucc2018-{}-en.training-gold.tar.bz2".format(lang),
|
672 |
+
)
|
673 |
)
|
674 |
bucc18_dl_dev_dir = dl_manager.download_and_extract(
|
675 |
+
os.path.join(
|
676 |
+
self.config.data_url,
|
677 |
+
"bucc2018-{}-en.sample-gold.tar.bz2".format(lang),
|
678 |
+
)
|
679 |
)
|
680 |
return [
|
681 |
datasets.SplitGenerator(
|
|
|
789 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
790 |
|
791 |
return [
|
|
|
792 |
datasets.SplitGenerator(
|
793 |
+
name=datasets.Split.TRAIN,
|
794 |
+
gen_kwargs={"filepath": downloaded_files["train"]},
|
795 |
+
),
|
796 |
+
datasets.SplitGenerator(
|
797 |
+
name=datasets.Split.VALIDATION,
|
798 |
+
gen_kwargs={"filepath": downloaded_files["dev"]},
|
799 |
),
|
800 |
]
|
801 |
|
|
|
847 |
"context": context,
|
848 |
"question": question,
|
849 |
"id": id_,
|
850 |
+
"answers": {
|
851 |
+
"answer_start": answer_starts,
|
852 |
+
"text": answers,
|
853 |
+
},
|
854 |
}
|
855 |
if self.config.name == "XNLI":
|
856 |
with open(filepath, encoding="utf-8") as f:
|
|
|
868 |
next(data) # skip header
|
869 |
for id_, row in enumerate(data):
|
870 |
if len(row) == 4:
|
871 |
+
yield id_, {
|
872 |
+
"sentence1": row[1],
|
873 |
+
"sentence2": row[2],
|
874 |
+
"label": row[3],
|
875 |
+
}
|
876 |
if self.config.name.startswith("XQuAD"):
|
877 |
with open(filepath, encoding="utf-8") as f:
|
878 |
xquad = json.load(f)
|
|
|
892 |
"context": context,
|
893 |
"question": question,
|
894 |
"id": id_,
|
895 |
+
"answers": {
|
896 |
+
"answer_start": answer_starts,
|
897 |
+
"text": answers,
|
898 |
+
},
|
899 |
}
|
900 |
if self.config.name.startswith("bucc18"):
|
901 |
files = sorted(os.listdir(filepath))
|
|
|
961 |
for id_file, file in enumerate(filepath):
|
962 |
with open(file, encoding="utf-8") as f:
|
963 |
data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
964 |
+
tokens = []
|
965 |
+
pos_tags = []
|
966 |
for id_row, row in enumerate(data):
|
967 |
if len(row) >= 10 and row[1] != "_" and row[3] != "_":
|
968 |
+
tokens.append(row[1])
|
969 |
+
pos_tags.append(row[3])
|
970 |
+
if len(row) == 0 and len(tokens) > 0:
|
971 |
+
yield str(id_file) + "_" + str(id_row), {
|
972 |
+
"tokens": tokens,
|
973 |
+
"pos_tags": pos_tags,
|
974 |
+
}
|
975 |
+
tokens = []
|
976 |
+
pos_tags = []
|
977 |
if self.config.name.startswith("PAN-X"):
|
978 |
guid_index = 1
|
979 |
with open(filepath, encoding="utf-8") as f:
|
|
|
983 |
for line in f:
|
984 |
if line == "" or line == "\n":
|
985 |
if tokens:
|
986 |
+
yield guid_index, {
|
987 |
+
"tokens": tokens,
|
988 |
+
"ner_tags": ner_tags,
|
989 |
+
"langs": langs,
|
990 |
+
}
|
991 |
guid_index += 1
|
992 |
tokens = []
|
993 |
ner_tags = []
|
|
|
1003 |
else:
|
1004 |
# examples have no label in test set
|
1005 |
ner_tags.append("O")
|
1006 |
+
if tokens:
|
1007 |
+
yield guid_index, {
|
1008 |
+
"tokens": tokens,
|
1009 |
+
"ner_tags": ner_tags,
|
1010 |
+
"langs": langs,
|
1011 |
+
}
|