Rename photo_dataset.py to photos.py
Browse files- photo_dataset.py +0 -61
- photos.py +48 -0
photo_dataset.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import random
|
3 |
-
import zipfile
|
4 |
-
from pathlib import Path
|
5 |
-
from datasets import load_dataset, Features, ClassLabel, Image
|
6 |
-
|
7 |
-
# Define the URLs for the zip files
|
8 |
-
_URLS = {
|
9 |
-
'Not Applicable': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Not Applicable.zip",
|
10 |
-
'Very Poor': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Very Poor.zip",
|
11 |
-
'Poor': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Poor.zip",
|
12 |
-
'Fair': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Fair.zip",
|
13 |
-
'Good': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Good.zip",
|
14 |
-
'Excellent': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Excellent.zip",
|
15 |
-
'Exceptional': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Exceptional.zip"
|
16 |
-
}
|
17 |
-
|
18 |
-
# Define the features of the dataset
|
19 |
-
features = Features({
|
20 |
-
'image': Image(),
|
21 |
-
'label': ClassLabel(names=list(_URLS.keys()))
|
22 |
-
})
|
23 |
-
|
24 |
-
def extract_and_load_images(data_dir):
|
25 |
-
# Extract all zip files and load images
|
26 |
-
images = []
|
27 |
-
for label, url in _URLS.items():
|
28 |
-
zip_path = os.path.join(data_dir, os.path.basename(url))
|
29 |
-
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
30 |
-
zip_ref.extractall(data_dir)
|
31 |
-
extracted_dir = Path(zip_path).stem # Remove .zip extension
|
32 |
-
for img_path in Path(os.path.join(data_dir, extracted_dir)).glob('*.jpg'):
|
33 |
-
images.append({'image': str(img_path), 'label': label})
|
34 |
-
return images
|
35 |
-
|
36 |
-
def split_dataset(images, split_ratios=(0.7, 0.2, 0.1)):
|
37 |
-
# Shuffle and split the dataset
|
38 |
-
random.shuffle(images)
|
39 |
-
total = len(images)
|
40 |
-
train_end = int(total * split_ratios[0])
|
41 |
-
val_end = train_end + int(total * split_ratios[1])
|
42 |
-
return {
|
43 |
-
'train': images[:train_end],
|
44 |
-
'validation': images[train_end:val_end],
|
45 |
-
'test': images[val_end:]
|
46 |
-
}
|
47 |
-
|
48 |
-
def photos_dataset(data_dir):
|
49 |
-
images = extract_and_load_images(data_dir)
|
50 |
-
splits = split_dataset(images)
|
51 |
-
return splits
|
52 |
-
|
53 |
-
if __name__ == '__main__':
|
54 |
-
data_dir = './photos_data'
|
55 |
-
os.makedirs(data_dir, exist_ok=True)
|
56 |
-
dataset_splits = photos_dataset(data_dir)
|
57 |
-
|
58 |
-
# Create Hugging Face datasets
|
59 |
-
for split, data in dataset_splits.items():
|
60 |
-
ds = load_dataset('image', data_files={split: data}, split=split, features=features)
|
61 |
-
print(f"{split} dataset: {ds}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
photos.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import zipfile
|
3 |
+
from pathlib import Path
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
class Photos(datasets.GeneratorBasedBuilder):
|
7 |
+
VERSION = datasets.Version("1.0.0")
|
8 |
+
|
9 |
+
def _info(self):
|
10 |
+
return datasets.DatasetInfo(
|
11 |
+
features=datasets.Features({
|
12 |
+
"image": datasets.Image(),
|
13 |
+
"label": datasets.ClassLabel(names=["Not Applicable", "Very Poor", "Poor", "Fair", "Good", "Excellent", "Exceptional"]),
|
14 |
+
}),
|
15 |
+
supervised_keys=("image", "label"),
|
16 |
+
)
|
17 |
+
|
18 |
+
def _split_generators(self, dl_manager):
|
19 |
+
# Define the URLs for the zip files
|
20 |
+
urls = {
|
21 |
+
'Not Applicable': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Not Applicable.zip",
|
22 |
+
'Very Poor': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Very Poor.zip",
|
23 |
+
'Poor': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Poor.zip",
|
24 |
+
'Fair': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Fair.zip",
|
25 |
+
'Good': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Good.zip",
|
26 |
+
'Excellent': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Excellent.zip",
|
27 |
+
'Exceptional': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Exceptional.zip"
|
28 |
+
}
|
29 |
+
|
30 |
+
# Download and extract the zip files
|
31 |
+
downloaded_files = dl_manager.download_and_extract(urls)
|
32 |
+
extracted_dirs = {label: Path(file).stem for label, file in downloaded_files.items()} # Remove .zip extension
|
33 |
+
|
34 |
+
# Here you would split the dataset into train, validation, and test sets
|
35 |
+
# For simplicity, we'll assume all images are used for training
|
36 |
+
return [
|
37 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"extracted_dirs": extracted_dirs}),
|
38 |
+
]
|
39 |
+
|
40 |
+
def _generate_examples(self, extracted_dirs):
|
41 |
+
# Iterate over the images in the extracted directories and yield examples
|
42 |
+
for label, dir in extracted_dirs.items():
|
43 |
+
label_dir = os.path.join(self.config.data_dir, dir)
|
44 |
+
for img_path in Path(label_dir).glob('*.jpg'):
|
45 |
+
yield str(img_path), {
|
46 |
+
"image": str(img_path),
|
47 |
+
"label": label,
|
48 |
+
}
|