File size: 1,122 Bytes
647b9ea
 
711f25f
 
647b9ea
 
711f25f
647b9ea
711f25f
 
 
 
647b9ea
 
711f25f
 
 
 
 
 
647b9ea
 
 
711f25f
647b9ea
 
 
711f25f
647b9ea
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
"""This script de-duplicates the data provided by the PathVQA authors,
creates an "imagefolder" dataset and pushes it to the hub.
"""

import os
import shutil
import pickle
import datasets
import pandas as pd

for split in ["train", "val", "test"]:
    
    os.makedirs(f"data/{split}/", exist_ok=True)
    
    # load the image-question-answer triplets
    data = pd.DataFrame(pickle.load(open(f"pvqa/qas/{split}/{split}_qa.pkl", "rb")))
    
    # drop the duplicate image-question-answer triplets
    data = data.drop_duplicates(ignore_index=True)
    
    # convert the image names to file names
    data = data.rename(columns={"image": "file_name"})
    data["file_name"] += ".jpg"
    
    # copy the images referenced by the question-answer pairs
    for image in data["file_name"].unique():
        shutil.copyfile(src=f"pvqa/images/{split}/{image}", dst=f"data/{split}/{image}")
    
    # save the metadata
    data.to_csv(f"data/{split}/metadata.csv", index=False)

# push the dataset to the hub
dataset = datasets.load_dataset("imagefolder", data_dir="data/")
dataset.push_to_hub("flaviagiammarino/path-vqa")