File size: 3,370 Bytes
1af3915 d61a5ba 1af3915 fc74c6f 3f0c3b8 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 ea0a244 fc74c6f 3f0c3b8 d61a5ba 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 ea0a244 1af3915 ea0a244 1af3915 fc74c6f 1af3915 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import csv
import datasets
import requests
import os
from PIL import Image
from io import BytesIO
from datasets.tasks import ImageClassification
_HOMEPAGE = "https://huggingface.co/datasets/rshrott/renovation"
_CITATION = """\
@ONLINE {renovationquality,
author="Your Name",
title="Renovation Quality Dataset",
month="Your Month",
year="Your Year",
url="https://huggingface.co/datasets/rshrott/renovation"
}
"""
_DESCRIPTION = """\
This dataset contains images of various properties, along with labels indicating the quality of renovation - 'cheap', 'average', 'expensive'.
"""
_URL = "https://huggingface.co/datasets/rshrott/renovation/raw/main/labels.csv"
_NAMES = ["cheap", "average", "expensive"]
class RenovationQualityDataset(datasets.GeneratorBasedBuilder):
"""Renovation Quality Dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image_file_path": datasets.Value("string"),
"image": datasets.Image(),
"label": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
task_templates=[ImageClassification(image_column="image", label_column="label")],
)
def _split_generators(self, dl_manager):
csv_path = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": csv_path,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": csv_path,
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": csv_path,
"split": "test",
},
),
]
def _generate_examples(self, filepath, split):
def url_to_image(url):
response = requests.get(url)
img = Image.open(BytesIO(response.content))
return img
with open(filepath, "r") as f:
reader = csv.reader(f)
next(reader) # skip header
rows = list(reader)
if split == 'train':
rows = rows[:int(0.8 * len(rows))]
elif split == 'validation':
rows = rows[int(0.8 * len(rows)):int(0.9 * len(rows))]
else: # test
rows = rows[int(0.9 * len(rows)):]
for id_, row in enumerate(rows):
if len(row) < 2:
print(f"Row with id {id_} has less than 2 elements: {row}")
else:
image_file_path = str(row[0])
image = url_to_image(image_file_path)
yield id_, {
'image_file_path': image_file_path,
'image': image,
'label': row[1],
}
|