path-vqa / scripts /process_dataset.py
flaviagiammarino's picture
Upload process_dataset.py
647b9ea
raw
history blame
1.12 kB
"""This script de-duplicates the data provided by the PathVQA authors,
creates an "imagefolder" dataset and pushes it to the hub.
"""
import os
import shutil
import pickle
import datasets
import pandas as pd
for split in ["train", "val", "test"]:
os.makedirs(f"data/{split}/", exist_ok=True)
# load the image-question-answer triplets
data = pd.DataFrame(pickle.load(open(f"pvqa/qas/{split}/{split}_qa.pkl", "rb")))
# drop the duplicate image-question-answer triplets
data = data.drop_duplicates(ignore_index=True)
# convert the image names to file names
data = data.rename(columns={"image": "file_name"})
data["file_name"] += ".jpg"
# copy the images referenced by the question-answer pairs
for image in data["file_name"].unique():
shutil.copyfile(src=f"pvqa/images/{split}/{image}", dst=f"data/{split}/{image}")
# save the metadata
data.to_csv(f"data/{split}/metadata.csv", index=False)
# push the dataset to the hub
dataset = datasets.load_dataset("imagefolder", data_dir="data/")
dataset.push_to_hub("flaviagiammarino/path-vqa")