"""PathVQA: 30000+ Questions for Medical Visual Question Answering""" import os import pandas import datasets _CITATION = """\ @article{he2020pathvqa, title={PathVQA: 30000+ Questions for Medical Visual Question Answering}, author={He, Xuehai and Zhang, Yichen and Mou, Luntian and Xing, Eric and Xie, Pengtao}, journal={arXiv preprint arXiv:2003.10286}, year={2020} } """ _DESCRIPTION = """\ PathVQA is a dataset of question-answer pairs on pathology images. The dataset is intended to be used for training and testing Medical Visual Question Answering (VQA) systems. The questions contained in the dataset are similar to those in the American Board of Pathology (ABP) test. The dataset includes both open-ended questions and binary "yes/no" questions. The dataset is built from two publicly-available pathology textbooks: "Textbook of Pathology" and "Basic Pathology", and a publicly-available digital library: "Pathology Education Informational Resource" (PEIR). The copyrights of images and captions belong to the publishers and authors of these two books, and the owners of the PEIR digital library. """ _HOMEPAGE = "https://github.com/UCSD-AI4H/PathVQA" _LICENSE = "MIT" _URLS = { "train": "data/train.parquet", "val": "data/val.parquet", "test": "data/test.parquet", } class PathVQA(datasets.GeneratorBasedBuilder): """ PathVQA: 30000+ Questions for Medical Visual Question Answering. The data was obtained from the updated Google Drive link shared by the authors on Feb 15, 2023, see https://github.com/UCSD-AI4H/PathVQA/commit/117e7f4ef88a0e65b0e7f37b98a73d6237a3ceab. This version of the dataset contains a total of 5,004 images and 32,795 question-answer pairs. Out of the 5,004 images, 4,289 images are referenced by a question-answer pair, while 715 images are not used. There are a few image-question-answer triplets which occur more than once in the same split (training, validation, test). After dropping the duplicate image-question-answer triplets, the dataset contains 32,632 question-answer pairs on 4,289 images. """ VERSION = datasets.Version("0.1.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Image(), "question": datasets.Value("string"), "answer": datasets.Value("string") } ), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": os.path.join(dl_manager.download(_URLS["train"])), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": os.path.join(dl_manager.download(_URLS["val"])), "split": "val", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": os.path.join(dl_manager.download(_URLS["test"])), "split": "test" }, ), ] def _generate_examples(self, filepath, split): df = pandas.read_parquet(filepath) for key, row in df.iterrows(): yield key, { "image": row["image"], "question": row["question"], "answer": row["answer"] }