Datasets:
Update files from the datasets library (from 1.17.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.17.0
- README.md +2 -2
- dataset_infos.json +1 -1
- dummy/en/1.1.0/dummy_data.zip +2 -2
- dummy/es/1.1.0/dummy_data.zip +2 -2
- head_qa.py +12 -13
README.md
CHANGED
@@ -128,7 +128,7 @@ An example from the HEAD-QA dataset looks as follows:
|
|
128 |
'atext': 'Presentan un periodo refractario.'
|
129 |
}],
|
130 |
'ra': '3',
|
131 |
-
'image':
|
132 |
'name': 'Cuaderno_2013_1_B',
|
133 |
'year': '2013'
|
134 |
}
|
@@ -143,7 +143,7 @@ An example from the HEAD-QA dataset looks as follows:
|
|
143 |
- `aid`: answer identifier (int)
|
144 |
- `atext`: answer text
|
145 |
- `ra`: `aid` of the right answer (int)
|
146 |
-
- `image`: optional
|
147 |
- `name`: name of the exam from which the question was extracted
|
148 |
- `year`: year in which the exam took place
|
149 |
|
|
|
128 |
'atext': 'Presentan un periodo refractario.'
|
129 |
}],
|
130 |
'ra': '3',
|
131 |
+
'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=675x538 at 0x1B42B6A1668>,
|
132 |
'name': 'Cuaderno_2013_1_B',
|
133 |
'year': '2013'
|
134 |
}
|
|
|
143 |
- `aid`: answer identifier (int)
|
144 |
- `atext`: answer text
|
145 |
- `ra`: `aid` of the right answer (int)
|
146 |
+
- `image`: (optional) a `PIL.Image.Image` object containing the image. Note that when accessing the image column: `dataset[0]["image"]` the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the `"image"` column, *i.e.* `dataset[0]["image"]` should **always** be preferred over `dataset["image"][0]`
|
147 |
- `name`: name of the exam from which the question was extracted
|
148 |
- `year`: year in which the exam took place
|
149 |
|
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"es": {"description": "HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the\nSpanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio\nde Sanidad, Consumo y Bienestar Social.\n\nThe dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology.\n", "citation": "@inproceedings{vilares-gomez-rodriguez-2019-head,\n title = \"{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning\",\n author = \"Vilares, David and\n G{'o}mez-Rodr{'i}guez, Carlos\",\n booktitle = \"Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2019\",\n address = \"Florence, Italy\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P19-1092\",\n doi = \"10.18653/v1/P19-1092\",\n pages = \"960--966\",\n abstract = \"We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.\",\n}\n", "homepage": "https://aghie.github.io/head-qa/", "license": "MIT License", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "year": {"dtype": "string", "id": null, "_type": "Value"}, "category": {"dtype": "string", "id": null, "_type": "Value"}, "qid": {"dtype": "int32", "id": null, "_type": "Value"}, "qtext": {"dtype": "string", "id": null, "_type": "Value"}, "ra": {"dtype": "int32", "id": null, "_type": "Value"}, "image": {"
|
|
|
1 |
+
{"es": {"description": "HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the\nSpanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio\nde Sanidad, Consumo y Bienestar Social.\n\nThe dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology.\n", "citation": "@inproceedings{vilares-gomez-rodriguez-2019-head,\n title = \"{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning\",\n author = \"Vilares, David and\n G{'o}mez-Rodr{'i}guez, Carlos\",\n booktitle = \"Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2019\",\n address = \"Florence, Italy\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P19-1092\",\n doi = \"10.18653/v1/P19-1092\",\n pages = \"960--966\",\n abstract = \"We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.\",\n}\n", "homepage": "https://aghie.github.io/head-qa/", "license": "MIT License", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "year": {"dtype": "string", "id": null, "_type": "Value"}, "category": {"dtype": "string", "id": null, "_type": "Value"}, "qid": {"dtype": "int32", "id": null, "_type": "Value"}, "qtext": {"dtype": "string", "id": null, "_type": "Value"}, "ra": {"dtype": "int32", "id": null, "_type": "Value"}, "image": {"id": null, "_type": "Image"}, "answers": [{"aid": {"dtype": "int32", "id": null, "_type": "Value"}, "atext": {"dtype": "string", "id": null, "_type": "Value"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "head_qa", "config_name": "es", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1227988, "num_examples": 2657, "dataset_name": "head_qa"}, "test": {"name": "test", "num_bytes": 1202592, "num_examples": 2742, "dataset_name": "head_qa"}, "validation": {"name": "validation", "num_bytes": 572652, "num_examples": 1366, "dataset_name": "head_qa"}}, "download_checksums": {"https://drive.google.com/u/0/uc?export=download&id=1a_95N5zQQoUCq8IBNVZgziHbeM-QxG2t": {"num_bytes": 79365502, "checksum": "6ec29a3f55153d167f0bdf05395558919ba0b1df9c63e79ffceda2a09884ad8b"}}, "download_size": 79365502, "post_processing_size": null, "dataset_size": 3003232, "size_in_bytes": 82368734}, "en": {"description": "HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the\nSpanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio\nde Sanidad, Consumo y Bienestar Social.\n\nThe dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology.\n", "citation": "@inproceedings{vilares-gomez-rodriguez-2019-head,\n title = \"{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning\",\n author = \"Vilares, David and\n G{'o}mez-Rodr{'i}guez, Carlos\",\n booktitle = \"Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2019\",\n address = \"Florence, Italy\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P19-1092\",\n doi = \"10.18653/v1/P19-1092\",\n pages = \"960--966\",\n abstract = \"We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.\",\n}\n", "homepage": "https://aghie.github.io/head-qa/", "license": "MIT License", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "year": {"dtype": "string", "id": null, "_type": "Value"}, "category": {"dtype": "string", "id": null, "_type": "Value"}, "qid": {"dtype": "int32", "id": null, "_type": "Value"}, "qtext": {"dtype": "string", "id": null, "_type": "Value"}, "ra": {"dtype": "int32", "id": null, "_type": "Value"}, "image": {"id": null, "_type": "Image"}, "answers": [{"aid": {"dtype": "int32", "id": null, "_type": "Value"}, "atext": {"dtype": "string", "id": null, "_type": "Value"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "head_qa", "config_name": "en", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1155118, "num_examples": 2657, "dataset_name": "head_qa"}, "test": {"name": "test", "num_bytes": 1130122, "num_examples": 2742, "dataset_name": "head_qa"}, "validation": {"name": "validation", "num_bytes": 539190, "num_examples": 1366, "dataset_name": "head_qa"}}, "download_checksums": {"https://drive.google.com/u/0/uc?export=download&id=1a_95N5zQQoUCq8IBNVZgziHbeM-QxG2t": {"num_bytes": 79365502, "checksum": "6ec29a3f55153d167f0bdf05395558919ba0b1df9c63e79ffceda2a09884ad8b"}}, "download_size": 79365502, "post_processing_size": null, "dataset_size": 2824430, "size_in_bytes": 82189932}}
|
dummy/en/1.1.0/dummy_data.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:273e9c051b7deafeef2b3d21e632cb681e37bd857e2abf75d20f5465ce2fa18e
|
3 |
+
size 2532
|
dummy/es/1.1.0/dummy_data.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9da2da566d4d95069bf0d360a78c882762c171ecc42f14895bcf00ed6e1d4526
|
3 |
+
size 2530
|
head_qa.py
CHANGED
@@ -50,10 +50,7 @@ _HOMEPAGE = "https://aghie.github.io/head-qa/"
|
|
50 |
|
51 |
_LICENSE = "MIT License"
|
52 |
|
53 |
-
|
54 |
-
"es": "https://drive.google.com/uc?export=download&id=1dUIqVwvoZAtbX_-z5axCoe97XNcFo1No",
|
55 |
-
"en": "https://drive.google.com/uc?export=download&id=1phryJg4FjCFkn0mSCqIOP2-FscAeKGV0",
|
56 |
-
}
|
57 |
|
58 |
_DIRS = {"es": "HEAD", "en": "HEAD_EN"}
|
59 |
|
@@ -81,7 +78,7 @@ class HeadQA(datasets.GeneratorBasedBuilder):
|
|
81 |
"qid": datasets.Value("int32"),
|
82 |
"qtext": datasets.Value("string"),
|
83 |
"ra": datasets.Value("int32"),
|
84 |
-
"image": datasets.
|
85 |
"answers": [
|
86 |
{
|
87 |
"aid": datasets.Value("int32"),
|
@@ -98,25 +95,27 @@ class HeadQA(datasets.GeneratorBasedBuilder):
|
|
98 |
|
99 |
def _split_generators(self, dl_manager):
|
100 |
"""Returns SplitGenerators."""
|
101 |
-
data_dir = dl_manager.download_and_extract(
|
102 |
|
103 |
dir = _DIRS[self.config.name]
|
104 |
-
|
105 |
|
106 |
return [
|
107 |
datasets.SplitGenerator(
|
108 |
-
name=datasets.Split.TRAIN,
|
|
|
109 |
),
|
110 |
datasets.SplitGenerator(
|
111 |
-
name=datasets.Split.TEST,
|
|
|
112 |
),
|
113 |
datasets.SplitGenerator(
|
114 |
name=datasets.Split.VALIDATION,
|
115 |
-
gen_kwargs={"filepath": os.path.join(
|
116 |
),
|
117 |
]
|
118 |
|
119 |
-
def _generate_examples(self, filepath):
|
120 |
"""Yields examples."""
|
121 |
with open(filepath, encoding="utf-8") as f:
|
122 |
head_qa = json.load(f)
|
@@ -129,7 +128,7 @@ class HeadQA(datasets.GeneratorBasedBuilder):
|
|
129 |
qid = int(question["qid"].strip())
|
130 |
qtext = question["qtext"].strip()
|
131 |
ra = int(question["ra"].strip())
|
132 |
-
|
133 |
|
134 |
aids = [answer["aid"] for answer in question["answers"]]
|
135 |
atexts = [answer["atext"].strip() for answer in question["answers"]]
|
@@ -143,6 +142,6 @@ class HeadQA(datasets.GeneratorBasedBuilder):
|
|
143 |
"qid": qid,
|
144 |
"qtext": qtext,
|
145 |
"ra": ra,
|
146 |
-
"image":
|
147 |
"answers": answers,
|
148 |
}
|
|
|
50 |
|
51 |
_LICENSE = "MIT License"
|
52 |
|
53 |
+
_URL = "https://drive.google.com/u/0/uc?export=download&id=1a_95N5zQQoUCq8IBNVZgziHbeM-QxG2t"
|
|
|
|
|
|
|
54 |
|
55 |
_DIRS = {"es": "HEAD", "en": "HEAD_EN"}
|
56 |
|
|
|
78 |
"qid": datasets.Value("int32"),
|
79 |
"qtext": datasets.Value("string"),
|
80 |
"ra": datasets.Value("int32"),
|
81 |
+
"image": datasets.Image(),
|
82 |
"answers": [
|
83 |
{
|
84 |
"aid": datasets.Value("int32"),
|
|
|
95 |
|
96 |
def _split_generators(self, dl_manager):
|
97 |
"""Returns SplitGenerators."""
|
98 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
99 |
|
100 |
dir = _DIRS[self.config.name]
|
101 |
+
data_lang_dir = os.path.join(data_dir, dir)
|
102 |
|
103 |
return [
|
104 |
datasets.SplitGenerator(
|
105 |
+
name=datasets.Split.TRAIN,
|
106 |
+
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_lang_dir, f"train_{dir}.json")},
|
107 |
),
|
108 |
datasets.SplitGenerator(
|
109 |
+
name=datasets.Split.TEST,
|
110 |
+
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_lang_dir, f"test_{dir}.json")},
|
111 |
),
|
112 |
datasets.SplitGenerator(
|
113 |
name=datasets.Split.VALIDATION,
|
114 |
+
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_lang_dir, f"dev_{dir}.json")},
|
115 |
),
|
116 |
]
|
117 |
|
118 |
+
def _generate_examples(self, data_dir, filepath):
|
119 |
"""Yields examples."""
|
120 |
with open(filepath, encoding="utf-8") as f:
|
121 |
head_qa = json.load(f)
|
|
|
128 |
qid = int(question["qid"].strip())
|
129 |
qtext = question["qtext"].strip()
|
130 |
ra = int(question["ra"].strip())
|
131 |
+
image_path = question["image"].strip()
|
132 |
|
133 |
aids = [answer["aid"] for answer in question["answers"]]
|
134 |
atexts = [answer["atext"].strip() for answer in question["answers"]]
|
|
|
142 |
"qid": qid,
|
143 |
"qtext": qtext,
|
144 |
"ra": ra,
|
145 |
+
"image": os.path.join(data_dir, image_path) if image_path else None,
|
146 |
"answers": answers,
|
147 |
}
|