import os import zipfile from pathlib import Path import datasets class Photos(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( features=datasets.Features({ "image": datasets.Image(), "label": datasets.ClassLabel(names=["Not Applicable", "Very Poor", "Poor", "Fair", "Good", "Excellent", "Exceptional"]), }), supervised_keys=("image", "label"), ) def _split_generators(self, dl_manager): # Define the URLs for the zip files urls = { 'Not Applicable': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Not Applicable.zip", 'Very Poor': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Very Poor.zip", 'Poor': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Poor.zip", 'Fair': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Fair.zip", 'Good': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Good.zip", 'Excellent': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Excellent.zip", 'Exceptional': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Exceptional.zip" } # Download and extract the zip files downloaded_files = dl_manager.download_and_extract(urls) extracted_dirs = {label: Path(file).stem for label, file in downloaded_files.items()} # Remove .zip extension # Here you would split the dataset into train, validation, and test sets # For simplicity, we'll assume all images are used for training return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"extracted_dirs": extracted_dirs}), ] def _generate_examples(self, extracted_dirs): # Iterate over the images in the extracted directories and yield examples for label, dir in extracted_dirs.items(): label_dir = os.path.join(self.config.data_dir, dir) for img_path in Path(label_dir).glob('*.jpg'): yield str(img_path), { "image": str(img_path), "label": label, }