Datasets:

toniwuest commited on
Commit
fe900f8
·
verified ·
1 Parent(s): eadedd0

Update CLEVR-Sudoku.py

Browse files
Files changed (1) hide show
  1. CLEVR-Sudoku.py +9 -34
CLEVR-Sudoku.py CHANGED
@@ -123,7 +123,7 @@ class CLEVRSudoku(datasets.GeneratorBasedBuilder):
123
  return [
124
  datasets.SplitGenerator(
125
  name=datasets.Split.TRAIN,
126
- gen_kwargs={"archive_path": archive_path, "dl_manager": dl_manager}
127
  )
128
  ]
129
 
@@ -150,7 +150,7 @@ class CLEVRSudoku(datasets.GeneratorBasedBuilder):
150
  # ),
151
  # ]
152
 
153
- def _generate_examples(self, archive_path, dl_manager):
154
  """Yields examples from the archive, assuming JSON files inside a 'json' folder."""
155
  print("archive path in generate examples")
156
  print(type(archive_path))
@@ -175,9 +175,14 @@ class CLEVRSudoku(datasets.GeneratorBasedBuilder):
175
  # Read and parse the JSON content
176
  json_content = json.load(f)
177
 
 
 
 
 
 
178
  # Extract the specific fields from the JSON
179
  extracted_data = {
180
- "sudoku": json_content.get("images"),
181
  # "options": json_content.get("options"),
182
  # "attributes": json_content.get("map_number_to_attributes"),
183
  "file_name": file_name, # The name of the file inside the archive
@@ -189,34 +194,4 @@ class CLEVRSudoku(datasets.GeneratorBasedBuilder):
189
  # Yield the extracted data
190
  yield i, extracted_data
191
 
192
- # # This uses `dl_manager.iter_archive` to iterate over files in the archive
193
- # with open(archive_path, "rb") as f:
194
- # for i, (file_name, file_handle) in enumerate(dl_manager.iter_archive(f)):
195
- # # Only process files that are in the 'json/' folder and end with '.json'
196
- # if file_name.startswith("json/") and file_name.endswith(".json"):
197
-
198
- # print(i, file_handle)
199
-
200
- # # Read and parse the JSON content
201
- # json_content = json.load(file_handle)
202
-
203
- # # Extract the specific fields from the JSON
204
- # extracted_data = {
205
- # # "file_name": file_name, # The name of the file inside the archive
206
- # "id": i, # Extract the 'id' field
207
- # # "name": json_content.get("name"), # Extract the 'name' field
208
- # # "solution": json_content.get("solution") # Extract the 'puzzle' field
209
- # }
210
-
211
- # # Yield the extracted data
212
- # yield i, extracted_data
213
-
214
- # for i, (sudoku, opt, label, attr) in enumerate(
215
- # zip(sudokus, options, labels, attributes)
216
- # ):
217
- # yield i, {
218
- # "sudoku": sudoku,
219
- # "options": opt,
220
- # "label": label,
221
- # "attributes": attr,
222
- # }
 
123
  return [
124
  datasets.SplitGenerator(
125
  name=datasets.Split.TRAIN,
126
+ gen_kwargs={"archive_path": archive_path, "image_path": image_path, "dl_manager": dl_manager}
127
  )
128
  ]
129
 
 
150
  # ),
151
  # ]
152
 
153
+ def _generate_examples(self, archive_path, image_path, dl_manager):
154
  """Yields examples from the archive, assuming JSON files inside a 'json' folder."""
155
  print("archive path in generate examples")
156
  print(type(archive_path))
 
175
  # Read and parse the JSON content
176
  json_content = json.load(f)
177
 
178
+ # sudoku image paths
179
+ sudoku_images = json_content.get("images")
180
+ sudoku_images = [os.path.join(image_path, x) if x is not None else None for row in sudoku_images for x in row]
181
+ sudoku_images = [sudoku_images_2[i:i+9] for i in range(0, len(sudoku_images_2), 9)]
182
+
183
  # Extract the specific fields from the JSON
184
  extracted_data = {
185
+ "sudoku": sudoku_images,
186
  # "options": json_content.get("options"),
187
  # "attributes": json_content.get("map_number_to_attributes"),
188
  "file_name": file_name, # The name of the file inside the archive
 
194
  # Yield the extracted data
195
  yield i, extracted_data
196
 
197
+