flaviagiammarino commited on
Commit
c3944be
1 Parent(s): 83b3c8e

Upload path_vqa.py

Browse files
Files changed (1) hide show
  1. path_vqa.py +135 -0
path_vqa.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PathVQA: 30000+ Questions for Medical Visual Question Answering"""
2
+
3
+ import pandas
4
+ import os
5
+
6
+ import datasets
7
+
8
+ _CITATION = """\
9
+ @article{he2020pathvqa,
10
+ title={PathVQA: 30000+ Questions for Medical Visual Question Answering},
11
+ author={He, Xuehai and Zhang, Yichen and Mou, Luntian and Xing, Eric and Xie, Pengtao},
12
+ journal={arXiv preprint arXiv:2003.10286},
13
+ year={2020}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ PathVQA is a dataset of question-answer pairs on pathology images. The questions are similar to those in the
19
+ American Board of Pathology (ABP) test. The dataset includes both open-ended questions and binary "yes/no"
20
+ questions. The dataset is built from two publicly-available pathology textbooks: "Textbook of Pathology" and
21
+ "Basic Pathology", and a publicly-available digital library: "Pathology Education Informational Resource"
22
+ (PEIR). The copyrights of images and captions belong to the publishers and authors of these two books,
23
+ and the owners of the PEIR digital library.
24
+ """
25
+
26
+ _HOMEPAGE = "https://github.com/UCSD-AI4H/PathVQA"
27
+
28
+ _LICENSE = "MIT"
29
+
30
+ _URLS = {
31
+ "image_train": "data/image/train_img.tar",
32
+ "image_val": "data/image/val_img.tar",
33
+ "image_test": "data/image/test_img.tar",
34
+ "text_train": "data/text/train_qa.jsonl",
35
+ "text_val": "data/text/val_qa.jsonl",
36
+ "text_test": "data/text/test_qa.jsonl",
37
+ }
38
+
39
+ class PathVQA(datasets.GeneratorBasedBuilder):
40
+
41
+ """
42
+ PathVQA: 30000+ Questions for Medical Visual Question Answering.
43
+
44
+ The data was obtained from the updated Google Drive link shared by the authors in their GitHub repository
45
+ on Feb 15, 2023, see https://github.com/UCSD-AI4H/PathVQA/commit/117e7f4ef88a0e65b0e7f37b98a73d6237a3ceab.
46
+
47
+ This version of the dataset contains a total of 5,004 images and 32,795 question-answer pairs. Of the
48
+ 5,004 images, 4,289 images are referenced by a question-answer pair, while 715 images are not used.
49
+ Furthermore, there are several duplicates, i.e. there are some image-question-answer triplets which occur
50
+ more than once in the same split (train, val, test). After dropping the duplicate image-question-answer
51
+ triplets, the dataset contains 32,632 question-answer pairs on 4,289 images.
52
+ """
53
+
54
+ VERSION = datasets.Version("0.1.0")
55
+
56
+ BUILDER_CONFIGS = [
57
+ datasets.BuilderConfig(name="full", version=VERSION, description="Original dataset."),
58
+ datasets.BuilderConfig(name="de-duped", version=VERSION, description="De-duplicated dataset."),
59
+ ]
60
+
61
+ DEFAULT_CONFIG_NAME = "de-duped"
62
+
63
+ def _info(self):
64
+
65
+ features = datasets.Features(
66
+ {
67
+ "image": datasets.Image(),
68
+ "question": datasets.Value("string"),
69
+ "answer": datasets.Value("string")
70
+ }
71
+ )
72
+
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=features,
76
+ homepage=_HOMEPAGE,
77
+ license=_LICENSE,
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+
83
+ # images
84
+ image_train_dir = dl_manager.download_and_extract(_URLS["image_train"])
85
+ image_val_dir = dl_manager.download_and_extract(_URLS["image_val"])
86
+ image_test_dir = dl_manager.download_and_extract(_URLS["image_test"])
87
+
88
+ # question-answer pairs
89
+ text_train_dir = dl_manager.download(_URLS["text_train"])
90
+ text_val_dir = dl_manager.download(_URLS["text_val"])
91
+ text_test_dir = dl_manager.download(_URLS["text_test"])
92
+
93
+ return [
94
+
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ gen_kwargs={
98
+ "image_filepath": os.path.join(image_train_dir),
99
+ "text_filepath": os.path.join(text_train_dir),
100
+ "split": "train",
101
+ },
102
+ ),
103
+
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.VALIDATION,
106
+ gen_kwargs={
107
+ "image_filepath": os.path.join(image_val_dir),
108
+ "text_filepath": os.path.join(text_val_dir),
109
+ "split": "val",
110
+ },
111
+ ),
112
+
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={
116
+ "image_filepath": os.path.join(image_test_dir),
117
+ "text_filepath": os.path.join(text_test_dir),
118
+ "split": "test"
119
+ },
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, image_filepath, text_filepath, split):
124
+
125
+ df = pandas.read_json(text_filepath, orient='records', lines=True)
126
+ if self.config.name == "de-duped":
127
+ df = df.drop_duplicates(ignore_index=True)
128
+
129
+ for key, row in df.iterrows():
130
+ yield key, {
131
+ "image": os.path.join(image_filepath, row['image']),
132
+ "question": row["question"],
133
+ "answer": row["answer"]
134
+ }
135
+