renovation / renovation.py
rshrott's picture
Update renovation.py
2f69b3c verified
raw
history blame
4 kB
import os
import glob
import random
import datasets
from datasets.tasks import ImageClassification
from datasets import load_dataset
import os
from huggingface_hub import login
_HOMEPAGE = "https://github.com/your-github/renovation"
_CITATION = """\
@ONLINE {renovationdata,
author="Your Name",
title="Renovation dataset",
month="January",
year="2023",
url="https://github.com/your-github/renovation"
}
"""
_DESCRIPTION = """\
Renovations is a dataset of images of houses taken in the field using smartphone
cameras. It consists of 7 classes: Not Applicable, Very Poor, Poor, Fair, Good, Excellent, and Exceptional renovations.
Data was collected by the your research lab.
"""
_URLS = {
"Not Applicable": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Not Applicable.zip",
"Very Poor": "https://huggingface.co/datasets/rshrott/photos/resolve/main/Very Poor.zip",
"Poor": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Poor.zip",
"Fair": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Fair.zip",
"Good": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Good.zip",
"Great": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Great.zip",
"Excellent": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Excellent.zip"
}
_NAMES = ["Not Applicable", "Very Poor", "Poor", "Fair", "Good", "Great", "Excellent"]
class Renovations(datasets.GeneratorBasedBuilder):
"""Renovations house images dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image_file_path": datasets.Value("string"),
"image": datasets.Image(),
"labels": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "labels"),
homepage=_HOMEPAGE,
citation=_CITATION,
task_templates=[ImageClassification(image_column="image", label_column="labels")],
)
def _split_generators(self, dl_manager):
data_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_files": data_files,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_files": data_files,
"split": "val",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_files": data_files,
"split": "test",
},
),
]
def _generate_examples(self, data_files, split):
all_files_and_labels = []
for label, path in data_files.items():
files = glob.glob(path + '/*.jpeg', recursive=True)
all_files_and_labels.extend((file, label) for file in files)
random.seed(43) # ensure reproducibility
random.shuffle(all_files_and_labels)
num_files = len(all_files_and_labels)
train_data = all_files_and_labels[:int(num_files * 0.8)]
test_data = all_files_and_labels[int(num_files * 0.8):int(num_files * 0.9)]
val_data = all_files_and_labels[int(num_files * 0.9):]
if split == "train":
data_to_use = train_data
elif split == "test":
data_to_use = test_data
else: # "val" split
data_to_use = val_data
for idx, (file, label) in enumerate(data_to_use):
yield idx, {
"image_file_path": file,
"image": file,
"labels": label,
}