|
""" |
|
This is the script used to create the dataset from the downloaded/extracted dataset @ https://www.vision.caltech.edu/datasets/cub_200_2011/ |
|
""" |
|
|
|
import datasets |
|
from pathlib import Path |
|
import shutil |
|
import json |
|
|
|
index_to_path = ( |
|
Path("CUB_200_2011/CUB_200_2011/images.txt") |
|
.read_text() |
|
.strip() |
|
.split("\n") |
|
) |
|
index_to_path = [Path("CUB_200_2011/CUB_200_2011/images") / Path(x.split(" ")[-1]) for x in index_to_path] |
|
|
|
index_to_split = ( |
|
Path("CUB_200_2011/CUB_200_2011/train_test_split.txt") |
|
.read_text() |
|
.strip() |
|
.split("\n") |
|
) |
|
index_to_split = ["train" if x.split(" ")[-1] == "1" else "test" for x in index_to_split] |
|
|
|
index_to_bbox = ( |
|
Path("CUB_200_2011/CUB_200_2011/bounding_boxes.txt") |
|
.read_text() |
|
.strip() |
|
.split("\n") |
|
) |
|
|
|
def convert_bbox(bbox): |
|
|
|
|
|
new_bbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]] |
|
return new_bbox |
|
|
|
index_to_bbox = [[float(i) for i in x.split(" ")[1:]] for x in index_to_bbox] |
|
index_to_bbox = [convert_bbox(bbox) for bbox in index_to_bbox] |
|
|
|
data_dir = Path("data") |
|
train_dir = Path("data") / Path("train") |
|
test_dir = Path("data") / Path("test") |
|
train_dir.mkdir(parents=True, exist_ok=True) |
|
test_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
metadata = [] |
|
|
|
for path, split, bbox in zip(index_to_path, index_to_split, index_to_bbox): |
|
class_dir, file_name = path.parts[-2:] |
|
dir = train_dir / class_dir if split == "train" else test_dir / class_dir |
|
dir.mkdir(parents=True, exist_ok=True) |
|
class_file_path = Path("/".join([class_dir, file_name])) |
|
destination_path = train_dir / class_file_path if split == "train" else test_dir / class_file_path |
|
metadata_file_name = Path("/".join(destination_path.parts[1:])) |
|
x_min, y_min, x_max, y_max = bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3] |
|
metadata.append({"file_name": str(metadata_file_name), "bbox": bbox}) |
|
shutil.copy(path, destination_path) |
|
|
|
with open("data/metadata.jsonl", "w") as f: |
|
for md in metadata: |
|
f.write(f"{json.dumps(md)}\n") |
|
|
|
dataset = datasets.load_dataset("imagefolder", data_dir="data", drop_labels=False) |
|
|
|
dataset.push_to_hub("bentrevett/cub-200-2011") |