Update CLEVR-Sudoku.py
Browse files- CLEVR-Sudoku.py +10 -59
CLEVR-Sudoku.py
CHANGED
@@ -66,12 +66,17 @@ class CLEVRSudoku(datasets.GeneratorBasedBuilder):
|
|
66 |
{
|
67 |
"sudoku": datasets.Sequence(datasets.Sequence(datasets.Image())),
|
68 |
"options": datasets.Sequence(datasets.Sequence(datasets.Image())),
|
69 |
-
#
|
70 |
"attributes": {
|
71 |
-
"key": datasets.Value("int32"),
|
72 |
-
"value": datasets.Sequence(datasets.Value("string")),
|
73 |
"1": datasets.Sequence(datasets.Value("string")),
|
74 |
-
"2": datasets.Sequence(datasets.Value("string"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
},
|
76 |
"id": datasets.Value("int32"),
|
77 |
"solution": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
|
@@ -86,39 +91,12 @@ class CLEVRSudoku(datasets.GeneratorBasedBuilder):
|
|
86 |
|
87 |
def _split_generators(self, dl_manager):
|
88 |
|
89 |
-
#
|
90 |
image_path = dl_manager.download_and_extract(self.config.image_url)
|
91 |
|
92 |
-
# archive_path, sudokus, options, labels, attributes = self.get_data(
|
93 |
-
# dl_manager, self.config.data_url, self.config.image_url
|
94 |
-
# )
|
95 |
-
|
96 |
# Download and extract the dataset archive
|
97 |
archive_path = dl_manager.download_and_extract(self.config.data_url)
|
98 |
|
99 |
-
files = os.listdir(image_path)
|
100 |
-
print(files)
|
101 |
-
|
102 |
-
dirs = [d for d in os.listdir(image_path) if d != "__MACOSX"]
|
103 |
-
top_dir = dirs[0]
|
104 |
-
|
105 |
-
print(os.listdir(os.path.join(image_path, top_dir)))
|
106 |
-
|
107 |
-
|
108 |
-
# unzip file
|
109 |
-
# import zipfile
|
110 |
-
# with zipfile.ZipFile(archive_path,"r") as zip_ref:
|
111 |
-
# unzipped_path = archive_path.split('.')[0]
|
112 |
-
# print(unzipped_path)
|
113 |
-
# zip_ref.extractall(unzipped_path)
|
114 |
-
|
115 |
-
files = os.listdir(archive_path)
|
116 |
-
print(files)
|
117 |
-
# archive_path = self.config.data_url
|
118 |
-
|
119 |
-
print("archive path in split generators")
|
120 |
-
print(type(archive_path))
|
121 |
-
|
122 |
# Define the dataset splits
|
123 |
return [
|
124 |
datasets.SplitGenerator(
|
@@ -127,35 +105,9 @@ class CLEVRSudoku(datasets.GeneratorBasedBuilder):
|
|
127 |
)
|
128 |
]
|
129 |
|
130 |
-
# return [
|
131 |
-
# datasets.SplitGenerator(
|
132 |
-
# name=datasets.Split.TRAIN,
|
133 |
-
# gen_kwargs={
|
134 |
-
# "image_dir": meta_data_path,
|
135 |
-
# "sudokus": sudokus,
|
136 |
-
# "options": options,
|
137 |
-
# "labels": labels,
|
138 |
-
# "attributes": attributes,
|
139 |
-
# },
|
140 |
-
# ),
|
141 |
-
# datasets.SplitGenerator(
|
142 |
-
# name=datasets.Split.TEST,
|
143 |
-
# gen_kwargs={
|
144 |
-
# "image_dir": meta_data_path,
|
145 |
-
# "sudokus": sudokus,
|
146 |
-
# "options": options,
|
147 |
-
# "labels": labels,
|
148 |
-
# "attributes": attributes,
|
149 |
-
# },
|
150 |
-
# ),
|
151 |
-
# ]
|
152 |
|
153 |
def _generate_examples(self, archive_path, image_path, dl_manager):
|
154 |
"""Yields examples from the archive, assuming JSON files inside a 'json' folder."""
|
155 |
-
print("archive path in generate examples")
|
156 |
-
print(type(archive_path))
|
157 |
-
|
158 |
-
print("generating examples from ", archive_path)
|
159 |
|
160 |
# list items in archive path
|
161 |
print(os.listdir(archive_path))
|
@@ -165,7 +117,6 @@ class CLEVRSudoku(datasets.GeneratorBasedBuilder):
|
|
165 |
|
166 |
json_dir = os.path.join(archive_path, top_dir, "json")
|
167 |
json_files = os.listdir(json_dir)
|
168 |
-
print(json_files)
|
169 |
|
170 |
for i, file_name in enumerate(json_files):
|
171 |
|
|
|
66 |
{
|
67 |
"sudoku": datasets.Sequence(datasets.Sequence(datasets.Image())),
|
68 |
"options": datasets.Sequence(datasets.Sequence(datasets.Image())),
|
69 |
+
# attributes as dict of features
|
70 |
"attributes": {
|
|
|
|
|
71 |
"1": datasets.Sequence(datasets.Value("string")),
|
72 |
+
"2": datasets.Sequence(datasets.Value("string")),
|
73 |
+
"3": datasets.Sequence(datasets.Value("string")),
|
74 |
+
"4": datasets.Sequence(datasets.Value("string")),
|
75 |
+
"5": datasets.Sequence(datasets.Value("string")),
|
76 |
+
"6": datasets.Sequence(datasets.Value("string")),
|
77 |
+
"7": datasets.Sequence(datasets.Value("string")),
|
78 |
+
"8": datasets.Sequence(datasets.Value("string")),
|
79 |
+
"9": datasets.Sequence(datasets.Value("string")),
|
80 |
},
|
81 |
"id": datasets.Value("int32"),
|
82 |
"solution": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
|
|
|
91 |
|
92 |
def _split_generators(self, dl_manager):
|
93 |
|
94 |
+
# Download and extract images
|
95 |
image_path = dl_manager.download_and_extract(self.config.image_url)
|
96 |
|
|
|
|
|
|
|
|
|
97 |
# Download and extract the dataset archive
|
98 |
archive_path = dl_manager.download_and_extract(self.config.data_url)
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
# Define the dataset splits
|
101 |
return [
|
102 |
datasets.SplitGenerator(
|
|
|
105 |
)
|
106 |
]
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
|
109 |
def _generate_examples(self, archive_path, image_path, dl_manager):
|
110 |
"""Yields examples from the archive, assuming JSON files inside a 'json' folder."""
|
|
|
|
|
|
|
|
|
111 |
|
112 |
# list items in archive path
|
113 |
print(os.listdir(archive_path))
|
|
|
117 |
|
118 |
json_dir = os.path.join(archive_path, top_dir, "json")
|
119 |
json_files = os.listdir(json_dir)
|
|
|
120 |
|
121 |
for i, file_name in enumerate(json_files):
|
122 |
|