Update renovation.py
Browse files- renovation.py +21 -25
renovation.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
import os
|
2 |
import glob
|
3 |
import random
|
4 |
-
import zipfile
|
5 |
|
6 |
import datasets
|
7 |
from datasets.tasks import ImageClassification
|
8 |
-
|
|
|
|
|
9 |
_HOMEPAGE = "https://github.com/your-github/renovation"
|
10 |
|
11 |
_CITATION = """\
|
@@ -25,17 +26,16 @@ Data was collected by the your research lab.
|
|
25 |
"""
|
26 |
|
27 |
_URLS = {
|
28 |
-
"Not Applicable": "https://huggingface.co/datasets/rshrott/
|
29 |
"Very Poor": "https://huggingface.co/datasets/rshrott/photos/resolve/main/Very Poor.zip",
|
30 |
-
"Poor": "https://huggingface.co/datasets/rshrott/
|
31 |
-
"Fair": "https://huggingface.co/datasets/rshrott/
|
32 |
-
"Good": "https://huggingface.co/datasets/rshrott/
|
33 |
-
"Excellent": "https://huggingface.co/datasets/rshrott/
|
34 |
-
"Exceptional": "https://huggingface.co/datasets/rshrott/
|
35 |
}
|
36 |
|
37 |
_NAMES = ["Not Applicable", "Very Poor", "Poor", "Fair", "Good", "Excellent", "Exceptional"]
|
38 |
-
|
39 |
class Renovations(datasets.GeneratorBasedBuilder):
|
40 |
"""Renovations house images dataset."""
|
41 |
|
@@ -56,52 +56,49 @@ class Renovations(datasets.GeneratorBasedBuilder):
|
|
56 |
)
|
57 |
|
58 |
def _split_generators(self, dl_manager):
|
59 |
-
|
60 |
return [
|
61 |
datasets.SplitGenerator(
|
62 |
name=datasets.Split.TRAIN,
|
63 |
gen_kwargs={
|
64 |
-
"
|
65 |
"split": "train",
|
66 |
},
|
67 |
),
|
68 |
datasets.SplitGenerator(
|
69 |
name=datasets.Split.VALIDATION,
|
70 |
gen_kwargs={
|
71 |
-
"
|
72 |
"split": "val",
|
73 |
},
|
74 |
),
|
75 |
datasets.SplitGenerator(
|
76 |
name=datasets.Split.TEST,
|
77 |
gen_kwargs={
|
78 |
-
"
|
79 |
"split": "test",
|
80 |
},
|
81 |
),
|
82 |
]
|
83 |
|
84 |
-
|
85 |
-
def _generate_examples(self, data_dir, split):
|
86 |
all_files_and_labels = []
|
87 |
-
for label in
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
all_files_and_labels.append((file, label))
|
92 |
-
|
93 |
random.seed(43) # ensure reproducibility
|
94 |
random.shuffle(all_files_and_labels)
|
95 |
-
|
96 |
num_files = len(all_files_and_labels)
|
97 |
train_data = all_files_and_labels[:int(num_files*0.9)]
|
98 |
val_test_data = all_files_and_labels[int(num_files*0.9):] # This will be used for both val and test
|
99 |
-
|
100 |
if split == "train":
|
101 |
data_to_use = train_data
|
102 |
else: # "val" or "test" split
|
103 |
data_to_use = val_test_data
|
104 |
-
|
105 |
for idx, (file, label) in enumerate(data_to_use):
|
106 |
yield idx, {
|
107 |
"image_file_path": file,
|
@@ -110,4 +107,3 @@ class Renovations(datasets.GeneratorBasedBuilder):
|
|
110 |
}
|
111 |
|
112 |
|
113 |
-
|
|
|
1 |
import os
|
2 |
import glob
|
3 |
import random
|
|
|
4 |
|
5 |
import datasets
|
6 |
from datasets.tasks import ImageClassification
|
7 |
+
from datasets import load_dataset
|
8 |
+
import os
|
9 |
+
from huggingface_hub import login
|
10 |
_HOMEPAGE = "https://github.com/your-github/renovation"
|
11 |
|
12 |
_CITATION = """\
|
|
|
26 |
"""
|
27 |
|
28 |
_URLS = {
|
29 |
+
"Not Applicable": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Not Applicable.zip",
|
30 |
"Very Poor": "https://huggingface.co/datasets/rshrott/photos/resolve/main/Very Poor.zip",
|
31 |
+
"Poor": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Poor.zip",
|
32 |
+
"Fair": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Fair.zip",
|
33 |
+
"Good": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Good.zip",
|
34 |
+
"Excellent": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Excellent.zip",
|
35 |
+
"Exceptional": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Exceptional.zip"
|
36 |
}
|
37 |
|
38 |
_NAMES = ["Not Applicable", "Very Poor", "Poor", "Fair", "Good", "Excellent", "Exceptional"]
|
|
|
39 |
class Renovations(datasets.GeneratorBasedBuilder):
|
40 |
"""Renovations house images dataset."""
|
41 |
|
|
|
56 |
)
|
57 |
|
58 |
def _split_generators(self, dl_manager):
|
59 |
+
data_files = dl_manager.download_and_extract(_URLS)
|
60 |
return [
|
61 |
datasets.SplitGenerator(
|
62 |
name=datasets.Split.TRAIN,
|
63 |
gen_kwargs={
|
64 |
+
"data_files": data_files,
|
65 |
"split": "train",
|
66 |
},
|
67 |
),
|
68 |
datasets.SplitGenerator(
|
69 |
name=datasets.Split.VALIDATION,
|
70 |
gen_kwargs={
|
71 |
+
"data_files": data_files,
|
72 |
"split": "val",
|
73 |
},
|
74 |
),
|
75 |
datasets.SplitGenerator(
|
76 |
name=datasets.Split.TEST,
|
77 |
gen_kwargs={
|
78 |
+
"data_files": data_files,
|
79 |
"split": "test",
|
80 |
},
|
81 |
),
|
82 |
]
|
83 |
|
84 |
+
def _generate_examples(self, data_files, split):
|
|
|
85 |
all_files_and_labels = []
|
86 |
+
for label, path in data_files.items():
|
87 |
+
files = glob.glob(path + '/*.jpeg', recursive=True)
|
88 |
+
all_files_and_labels.extend((file, label) for file in files)
|
89 |
+
|
|
|
|
|
90 |
random.seed(43) # ensure reproducibility
|
91 |
random.shuffle(all_files_and_labels)
|
92 |
+
|
93 |
num_files = len(all_files_and_labels)
|
94 |
train_data = all_files_and_labels[:int(num_files*0.9)]
|
95 |
val_test_data = all_files_and_labels[int(num_files*0.9):] # This will be used for both val and test
|
96 |
+
|
97 |
if split == "train":
|
98 |
data_to_use = train_data
|
99 |
else: # "val" or "test" split
|
100 |
data_to_use = val_test_data
|
101 |
+
|
102 |
for idx, (file, label) in enumerate(data_to_use):
|
103 |
yield idx, {
|
104 |
"image_file_path": file,
|
|
|
107 |
}
|
108 |
|
109 |
|
|