Datasets:

File size: 8,655 Bytes
71f6c3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e82987a
71f6c3a
 
 
 
5032e84
7196e1f
5032e84
 
 
 
71f6c3a
 
 
 
 
 
3642550
71f6c3a
 
 
 
 
 
 
 
 
 
 
 
3642550
71f6c3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417abf8
 
 
 
 
 
 
9b49a48
 
71f6c3a
 
e8837b2
71f6c3a
 
 
 
 
6de8d90
71f6c3a
 
 
3c83ab3
71f6c3a
067c9b0
 
 
 
 
db23d1c
b6b0d41
 
e82987a
 
 
 
 
b6b0d41
e82987a
b6b0d41
 
71f6c3a
2df7110
6de8d90
674794b
067c9b0
71f6c3a
 
 
e14efe6
067c9b0
71f6c3a
067c9b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
599dc81
067c9b0
2df7110
6de8d90
44624b3
a424743
28e6277
03375da
 
 
6e667c1
 
 
 
03375da
 
9a68693
30ea15c
b587005
30ea15c
fbb59b7
30ea15c
fbb59b7
 
b587005
 
 
30ea15c
b587005
4c81770
 
b587005
 
 
 
15a6813
 
 
 
 
 
 
 
 
 
 
067c9b0
15a6813
 
 
 
 
 
 
889f6af
15a6813
 
067c9b0
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import os
import json

import datasets

_DESCRIPTION = """\
CLEVR-Sudoku is a dataset for the task of Sudoku puzzle solving. It is a synthetic dataset generated using the CLEVR engine. The dataset consists of 3x3 Sudoku puzzles with varying levels of difficulty. The dataset is divided into three categories based on the number of known cells in the puzzle: Easy (K10), Medium (K30), and Hard (K50). Each puzzle is accompanied by a set of 10 possible solutions. The dataset is generated using the CLEVR engine and is available in the form of images and JSON files. The images are 256x256 pixels in size and are stored in the PNG format. The JSON files contain the puzzle, the solution, and the possible solutions in the form of a dictionary. The dataset is available for download in the form of a zip file. The dataset is intended for use in the development of machine learning models for the task of Sudoku puzzle solving.
"""

_CITATION = """\
@article{stammer2024neural,
  title={Neural Concept Binder},
  author={Stammer, Wolfgang and W{\"u}st, Antonia and Steinmann, David and Kersting, Kristian},
  journal={Advances in Neural Information Processing Systems},
  year={2024}
}"""

_HOME_PAGE = "https://ml-research.github.io/NeuralConceptBinder/"
_IMAGES_URL = "https://huggingface.co/datasets/AIML-TUDA/CLEVR-Sudoku/resolve/main"
_LICENSE = "cc-by-4.0"
_DIR = _IMAGES_URL

_URL_DATA = {
    "CLEVR-Easy-K10": [f"{_DIR}/CLEVR-Easy-Sudokus-K10.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
    "CLEVR-Easy-K30": [f"{_DIR}/CLEVR-Easy-Sudokus-K30.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
    "CLEVR-Easy-K50": [f"{_DIR}/CLEVR-Easy-Sudokus-K50.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
    "CLEVR-4-K10": [f"{_DIR}/CLEVR-4-Sudokus-K10.zip", f"{_DIR}/sudoku.zip"],
    "CLEVR-4-K30": [f"{_DIR}/CLEVR-4-Sudokus-K30.zip", f"{_DIR}/sudoku.zip"],
    "CLEVR-4-K50": [f"{_DIR}/CLEVR-4-Sudokus-K50.zip", f"{_DIR}/sudoku.zip"],
}


class CLEVRSudokuConfig(datasets.BuilderConfig):
    """Builder Config for CLEVR-Sudoku."""

    def __init__(self, data_url, image_url, **kwargs):
        """Builder Config for CLEVR-Sudoku.
        Args:
          metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
          **kwargs: keyword arguments forwarded to super.
        """
        super(CLEVRSudokuConfig, self).__init__(
            version=datasets.Version("1.0.0"), **kwargs
        )
        # if isinstance(data_url, dict):
        #     self.metadata_urls = data_url
        # else:
        self.data_url = data_url
        self.image_url = image_url
        self.metadata_urls = {"train": data_url, "test": None}


class CLEVRSudoku(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        CLEVRSudokuConfig(
            name=name, description=name, data_url=urls[0], image_url=urls[1]
        )
        for name, urls in _URL_DATA.items()
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    # TODO: Add features
                    # "sudoku": datasets.Array2D(shape=(9, 9), dtype=datasets.Image()),
                    # "options": datasets.Array2D(shape=(9, 10), dtype=datasets.Image()),
                    # # attributes as dict of features
                    # "attributes": {
                    #     "key": datasets.Value("int32"),
                    #     "value": datasets.Sequence(datasets.Value("string"))
                    # },
                    "id": datasets.Value("int32"),
                    # "solution": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
                }
            ),
            supervised_keys=None,
            homepage=_HOME_PAGE,
            citation=_CITATION,
            license=_LICENSE,
        )

    
    def _split_generators(self, dl_manager):

        # TODO: define image directory
        meta_data_path = dl_manager.download_and_extract(self.config.image_url)

        # archive_path, sudokus, options, labels, attributes = self.get_data(
        #     dl_manager, self.config.data_url, self.config.image_url
        # )

        # Download and extract the dataset archive
        archive_path = dl_manager.download_and_extract(self.config.data_url)

        # unzip file
        # import zipfile
        # with zipfile.ZipFile(archive_path,"r") as zip_ref:
        #     unzipped_path = archive_path.split('.')[0]
        #     print(unzipped_path)
        #     zip_ref.extractall(unzipped_path)
        
        files = os.listdir(archive_path)
        print(files)
        # archive_path = self.config.data_url

        print("archive path in split generators")
        print(type(archive_path))

        # Define the dataset splits
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"archive_path": archive_path, "dl_manager": dl_manager}
            )
        ]
        
        # return [
        #     datasets.SplitGenerator(
        #         name=datasets.Split.TRAIN,
        #         gen_kwargs={
        #             "image_dir": meta_data_path,
        #             "sudokus": sudokus,
        #             "options": options,
        #             "labels": labels,
        #             "attributes": attributes,
        #         },
        #     ),
        #     datasets.SplitGenerator(
        #         name=datasets.Split.TEST,
        #         gen_kwargs={
        #             "image_dir": meta_data_path,
        #             "sudokus": sudokus,
        #             "options": options,
        #             "labels": labels,
        #             "attributes": attributes,
        #         },
        #     ),
        # ]

    def _generate_examples(self, archive_path, dl_manager):
        """Yields examples from the archive, assuming JSON files inside a 'json' folder."""
        print("archive path in generate examples")
        print(type(archive_path))

        print("generating examples from ", archive_path)

        # list items in archive path
        print(os.listdir(archive_path))

        dirs = [d for d in os.listdir(archive_path) if d != "__MACOSX"]
        top_dir = dirs[0]

        json_dir = os.path.join(archive_path, top_dir, "json")
        json_files = os.listdir(json_dir)
        print(json_files)

        for i, file_name in enumerate(json_files):

            print(i, file_name)

            with open(os.path.join(archive_path, top_dir, "json", file_name), 'r') as f:
                # Read and parse the JSON content
                json_content = json.load(f)

            # Extract the specific fields from the JSON
            extracted_data = {
                "file_name": file_name,        # The name of the file inside the archive
                "id": i,  # Extract the 'id' field
                "name": json_content.get("name"),  # Extract the 'name' field
                "solution": json_content.get("solution")  # Extract the 'puzzle' field
            }
            
            # Yield the extracted data
            yield i, extracted_data
    
        # # This uses `dl_manager.iter_archive` to iterate over files in the archive
        # with open(archive_path, "rb") as f:
        #     for i, (file_name, file_handle) in enumerate(dl_manager.iter_archive(f)):
        #         # Only process files that are in the 'json/' folder and end with '.json'
        #         if file_name.startswith("json/") and file_name.endswith(".json"):

        #             print(i, file_handle)
                    
        #             # Read and parse the JSON content
        #             json_content = json.load(file_handle)
                    
        #             # Extract the specific fields from the JSON
        #             extracted_data = {
        #                 # "file_name": file_name,        # The name of the file inside the archive
        #                 "id": i,  # Extract the 'id' field
        #                 # "name": json_content.get("name"),  # Extract the 'name' field
        #                 # "solution": json_content.get("solution")  # Extract the 'puzzle' field
        #             }
                    
        #             # Yield the extracted data
        #             yield i, extracted_data
        
        # for i, (sudoku, opt, label, attr) in enumerate(
        #     zip(sudokus, options, labels, attributes)
        # ):
        #     yield i, {
        #         "sudoku": sudoku,
        #         "options": opt,
        #         "label": label,
        #         "attributes": attr,
        #     }