flaviagiammarino commited on
Commit
647b9ea
β€’
1 Parent(s): c9602cf

Upload process_dataset.py

Browse files
Files changed (1) hide show
  1. scripts/process_dataset.py +20 -55
scripts/process_dataset.py CHANGED
@@ -1,70 +1,35 @@
1
- """This script processes the dataset provided by the PathVQA authors. This script drops the duplicate
2
- image-question-answer triplets, converts the images to bytes, and saves the dataset to a parquet file.
3
  """
4
 
5
- import io
 
6
  import pickle
 
7
  import pandas as pd
8
- from PIL import Image
9
- from tqdm import tqdm
10
 
11
- # loop across the splits
12
  for split in ["train", "val", "test"]:
13
 
 
 
14
  # load the image-question-answer triplets
15
  data = pd.DataFrame(pickle.load(open(f"pvqa/qas/{split}/{split}_qa.pkl", "rb")))
16
- print(f"Total number of triplets in {split} set: {format(data.shape[0], ',.0f')}")
17
 
18
  # drop the duplicate image-question-answer triplets
19
  data = data.drop_duplicates(ignore_index=True)
20
- print(f"Unique number of triplets in {split} set: {format(data.shape[0], ',.0f')}")
21
 
22
- # load the images as bytes
23
- print(f"Loading {split} set images...")
24
- images = {}
25
- for image in tqdm(data["image"].unique()):
26
- img = Image.open(f"pvqa/images/{split}/{image}.jpg")
27
- byt = io.BytesIO()
28
- img.save(byt, format="jpeg")
29
- byt = byt.getvalue()
30
- images[image] = {"path": None, "bytes": byt}
31
- print(f"Unique number of images in {split} set: {format(len(images), ',.0f')}")
32
 
33
- # save the data to a parquet file
34
- print(f"Writing data to data/{split}.parquet...")
35
- dataset = []
36
- for _, row in data.iterrows():
37
- dataset.append({
38
- "image": images[row["image"]],
39
- "question": row["question"],
40
- "answer": row["answer"]
41
- })
42
- pd.DataFrame(dataset).to_parquet(f"data/{split}.parquet")
43
- print("Done")
44
- print("---------------------------------")
45
 
46
- '''
47
- Total number of triplets in train set: 19,755
48
- Unique number of triplets in train set: 19,654
49
- Loading train set images...
50
- 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2599/2599 [00:46<00:00, 56.27it/s]
51
- Unique number of images in train set: 2,599
52
- Writing data to data/train.parquet...
53
- Done
54
- ---------------------------------
55
- Total number of triplets in val set: 6,279
56
- Unique number of triplets in val set: 6,259
57
- Loading val set images...
58
- 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 832/832 [00:13<00:00, 59.49it/s]
59
- Unique number of images in val set: 832
60
- Writing data to data/val.parquet...
61
- Done
62
- ---------------------------------
63
- Total number of triplets in test set: 6,761
64
- Unique number of triplets in test set: 6,719
65
- Loading test set images...
66
- 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 858/858 [00:15<00:00, 53.93it/s]
67
- Unique number of images in test set: 858
68
- Writing data to data/test.parquet...
69
- Done
70
- '''
 
1
+ """This script de-duplicates the data provided by the PathVQA authors,
2
+ creates an "imagefolder" dataset and pushes it to the hub.
3
  """
4
 
5
+ import os
6
+ import shutil
7
  import pickle
8
+ import datasets
9
  import pandas as pd
 
 
10
 
 
11
  for split in ["train", "val", "test"]:
12
 
13
+ os.makedirs(f"data/{split}/", exist_ok=True)
14
+
15
  # load the image-question-answer triplets
16
  data = pd.DataFrame(pickle.load(open(f"pvqa/qas/{split}/{split}_qa.pkl", "rb")))
 
17
 
18
  # drop the duplicate image-question-answer triplets
19
  data = data.drop_duplicates(ignore_index=True)
 
20
 
21
+ # convert the image names to file names
22
+ data = data.rename(columns={"image": "file_name"})
23
+ data["file_name"] += ".jpg"
 
 
 
 
 
 
 
24
 
25
+ # copy the images referenced by the question-answer pairs
26
+ for image in data["file_name"].unique():
27
+ shutil.copyfile(src=f"pvqa/images/{split}/{image}", dst=f"data/{split}/{image}")
 
 
 
 
 
 
 
 
 
28
 
29
+ # save the metadata
30
+ data.to_csv(f"data/{split}/metadata.csv", index=False)
31
+
32
+ # push the dataset to the hub
33
+ dataset = datasets.load_dataset("imagefolder", data_dir="data/")
34
+ dataset.push_to_hub("flaviagiammarino/path-vqa")
35
+