Datasets:
Tasks:
Visual Question Answering
Formats:
parquet
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
medical
License:
flaviagiammarino
commited on
Commit
β’
711f25f
1
Parent(s):
4895199
Upload process_dataset.py
Browse files- scripts/process_dataset.py +70 -0
scripts/process_dataset.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This script processes the dataset provided by the PathVQA authors. This script drops the duplicate
|
2 |
+
image-question-answer triplets, converts the images to bytes, and saves the dataset to a parquet file.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import io
|
6 |
+
import pickle
|
7 |
+
import pandas as pd
|
8 |
+
from PIL import Image
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
# loop across the splits
|
12 |
+
for split in ["train", "val", "test"]:
|
13 |
+
|
14 |
+
# load the image-question-answer triplets
|
15 |
+
data = pd.DataFrame(pickle.load(open(f"pvqa/qas/{split}/{split}_qa.pkl", "rb")))
|
16 |
+
print(f"Total number of triplets in {split} set: {format(data.shape[0], ',.0f')}")
|
17 |
+
|
18 |
+
# drop the duplicate image-question-answer triplets
|
19 |
+
data = data.drop_duplicates(ignore_index=True)
|
20 |
+
print(f"Unique number of triplets in {split} set: {format(data.shape[0], ',.0f')}")
|
21 |
+
|
22 |
+
# load the images as bytes
|
23 |
+
print(f"Loading {split} set images...")
|
24 |
+
images = {}
|
25 |
+
for image in tqdm(data["image"].unique()):
|
26 |
+
img = Image.open(f"pvqa/images/{split}/{image}.jpg")
|
27 |
+
byt = io.BytesIO()
|
28 |
+
img.save(byt, format="jpeg")
|
29 |
+
byt = byt.getvalue()
|
30 |
+
images[image] = {"path": None, "bytes": byt}
|
31 |
+
print(f"Unique number of images in {split} set: {format(len(images), ',.0f')}")
|
32 |
+
|
33 |
+
# save the data to a parquet file
|
34 |
+
print(f"Writing data to data/{split}.parquet...")
|
35 |
+
dataset = []
|
36 |
+
for _, row in data.iterrows():
|
37 |
+
dataset.append({
|
38 |
+
"image": images[row["image"]],
|
39 |
+
"question": row["question"],
|
40 |
+
"answer": row["answer"]
|
41 |
+
})
|
42 |
+
pd.DataFrame(dataset).to_parquet(f"data/{split}.parquet")
|
43 |
+
print("Done")
|
44 |
+
print("---------------------------------")
|
45 |
+
|
46 |
+
'''
|
47 |
+
Total number of triplets in train set: 19,755
|
48 |
+
Unique number of triplets in train set: 19,654
|
49 |
+
Loading train set images...
|
50 |
+
100%|ββββββββββ| 2599/2599 [00:46<00:00, 56.27it/s]
|
51 |
+
Unique number of images in train set: 2,599
|
52 |
+
Writing data to data/train.parquet...
|
53 |
+
Done
|
54 |
+
---------------------------------
|
55 |
+
Total number of triplets in val set: 6,279
|
56 |
+
Unique number of triplets in val set: 6,259
|
57 |
+
Loading val set images...
|
58 |
+
100%|ββββββββββ| 832/832 [00:13<00:00, 59.49it/s]
|
59 |
+
Unique number of images in val set: 832
|
60 |
+
Writing data to data/val.parquet...
|
61 |
+
Done
|
62 |
+
---------------------------------
|
63 |
+
Total number of triplets in test set: 6,761
|
64 |
+
Unique number of triplets in test set: 6,719
|
65 |
+
Loading test set images...
|
66 |
+
100%|ββββββββββ| 858/858 [00:15<00:00, 53.93it/s]
|
67 |
+
Unique number of images in test set: 858
|
68 |
+
Writing data to data/test.parquet...
|
69 |
+
Done
|
70 |
+
'''
|