rshrott commited on
Commit
12e81a4
1 Parent(s): cdd659f

Update photo_dataset.py

Browse files
Files changed (1) hide show
  1. photo_dataset.py +61 -0
photo_dataset.py CHANGED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import zipfile
4
+ from pathlib import Path
5
+ from datasets import load_dataset, Features, ClassLabel, Image
6
+
7
+ # Define the URLs for the zip files
8
+ _URLS = {
9
+ 'Not Applicable': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Not Applicable.zip",
10
+ 'Very Poor': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Very Poor.zip",
11
+ 'Poor': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Poor.zip",
12
+ 'Fair': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Fair.zip",
13
+ 'Good': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Good.zip",
14
+ 'Excellent': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Excellent.zip",
15
+ 'Exceptional': "https://huggingface.co/datasets/rshrott/photos/resolve/main/Exceptional.zip"
16
+ }
17
+
18
+ # Define the features of the dataset
19
+ features = Features({
20
+ 'image': Image(),
21
+ 'label': ClassLabel(names=list(_URLS.keys()))
22
+ })
23
+
24
+ def extract_and_load_images(data_dir):
25
+ # Extract all zip files and load images
26
+ images = []
27
+ for label, url in _URLS.items():
28
+ zip_path = os.path.join(data_dir, os.path.basename(url))
29
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
30
+ zip_ref.extractall(data_dir)
31
+ extracted_dir = Path(zip_path).stem # Remove .zip extension
32
+ for img_path in Path(os.path.join(data_dir, extracted_dir)).glob('*.jpg'):
33
+ images.append({'image': str(img_path), 'label': label})
34
+ return images
35
+
36
+ def split_dataset(images, split_ratios=(0.7, 0.2, 0.1)):
37
+ # Shuffle and split the dataset
38
+ random.shuffle(images)
39
+ total = len(images)
40
+ train_end = int(total * split_ratios[0])
41
+ val_end = train_end + int(total * split_ratios[1])
42
+ return {
43
+ 'train': images[:train_end],
44
+ 'validation': images[train_end:val_end],
45
+ 'test': images[val_end:]
46
+ }
47
+
48
+ def photos_dataset(data_dir):
49
+ images = extract_and_load_images(data_dir)
50
+ splits = split_dataset(images)
51
+ return splits
52
+
53
+ if __name__ == '__main__':
54
+ data_dir = './photos_data'
55
+ os.makedirs(data_dir, exist_ok=True)
56
+ dataset_splits = photos_dataset(data_dir)
57
+
58
+ # Create Hugging Face datasets
59
+ for split, data in dataset_splits.items():
60
+ ds = load_dataset('image', data_files={split: data}, split=split, features=features)
61
+ print(f"{split} dataset: {ds}")