Datasets:

File size: 6,924 Bytes
71f6c3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e82987a
71f6c3a
 
 
 
5032e84
7196e1f
5032e84
 
 
 
71f6c3a
 
 
 
 
 
3642550
71f6c3a
 
 
 
 
 
 
 
 
 
 
 
3642550
71f6c3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c6b749
f17dacf
fef132c
f17dacf
9d08ec2
fef132c
 
 
 
 
 
 
 
f17dacf
9b49a48
c636967
71f6c3a
 
e8837b2
71f6c3a
 
 
 
 
6de8d90
71f6c3a
 
fef132c
ec4b51f
71f6c3a
067c9b0
db23d1c
b6b0d41
067c9b0
71f6c3a
 
 
fe900f8
067c9b0
71f6c3a
067c9b0
 
fe900f8
067c9b0
28e6277
03375da
 
 
6e667c1
 
 
 
03375da
9a68693
30ea15c
b587005
30ea15c
fbb59b7
30ea15c
fbb59b7
 
b587005
fe900f8
 
 
b3f5cd9
f17dacf
 
 
 
 
 
 
b587005
 
fe900f8
f17dacf
 
30ea15c
b587005
4c81770
 
b587005
 
 
 
15a6813
fe900f8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import os
import json

import datasets

_DESCRIPTION = """\
CLEVR-Sudoku is a dataset for the task of Sudoku puzzle solving. It is a synthetic dataset generated using the CLEVR engine. The dataset consists of 3x3 Sudoku puzzles with varying levels of difficulty. The dataset is divided into three categories based on the number of known cells in the puzzle: Easy (K10), Medium (K30), and Hard (K50). Each puzzle is accompanied by a set of 10 possible solutions. The dataset is generated using the CLEVR engine and is available in the form of images and JSON files. The images are 256x256 pixels in size and are stored in the PNG format. The JSON files contain the puzzle, the solution, and the possible solutions in the form of a dictionary. The dataset is available for download in the form of a zip file. The dataset is intended for use in the development of machine learning models for the task of Sudoku puzzle solving.
"""

_CITATION = """\
@article{stammer2024neural,
  title={Neural Concept Binder},
  author={Stammer, Wolfgang and W{\"u}st, Antonia and Steinmann, David and Kersting, Kristian},
  journal={Advances in Neural Information Processing Systems},
  year={2024}
}"""

_HOME_PAGE = "https://ml-research.github.io/NeuralConceptBinder/"
_IMAGES_URL = "https://huggingface.co/datasets/AIML-TUDA/CLEVR-Sudoku/resolve/main"
_LICENSE = "cc-by-4.0"
_DIR = _IMAGES_URL

_URL_DATA = {
    "CLEVR-Easy-K10": [f"{_DIR}/CLEVR-Easy-Sudokus-K10.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
    "CLEVR-Easy-K30": [f"{_DIR}/CLEVR-Easy-Sudokus-K30.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
    "CLEVR-Easy-K50": [f"{_DIR}/CLEVR-Easy-Sudokus-K50.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
    "CLEVR-4-K10": [f"{_DIR}/CLEVR-4-Sudokus-K10.zip", f"{_DIR}/sudoku.zip"],
    "CLEVR-4-K30": [f"{_DIR}/CLEVR-4-Sudokus-K30.zip", f"{_DIR}/sudoku.zip"],
    "CLEVR-4-K50": [f"{_DIR}/CLEVR-4-Sudokus-K50.zip", f"{_DIR}/sudoku.zip"],
}


class CLEVRSudokuConfig(datasets.BuilderConfig):
    """Builder Config for CLEVR-Sudoku."""

    def __init__(self, data_url, image_url, **kwargs):
        """Builder Config for CLEVR-Sudoku.
        Args:
          metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
          **kwargs: keyword arguments forwarded to super.
        """
        super(CLEVRSudokuConfig, self).__init__(
            version=datasets.Version("1.0.0"), **kwargs
        )
        # if isinstance(data_url, dict):
        #     self.metadata_urls = data_url
        # else:
        self.data_url = data_url
        self.image_url = image_url
        self.metadata_urls = {"train": data_url, "test": None}


class CLEVRSudoku(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        CLEVRSudokuConfig(
            name=name, description=name, data_url=urls[0], image_url=urls[1]
        )
        for name, urls in _URL_DATA.items()
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "sudoku": datasets.Sequence(datasets.Sequence(datasets.Image())),
                    "options": datasets.Sequence(datasets.Sequence(datasets.Image())),
                    # attributes as dict of features
                    "attributes": {
                        "1": datasets.Sequence(datasets.Value("string")),
                        "2": datasets.Sequence(datasets.Value("string")),
                        "3": datasets.Sequence(datasets.Value("string")),
                        "4": datasets.Sequence(datasets.Value("string")),
                        "5": datasets.Sequence(datasets.Value("string")),
                        "6": datasets.Sequence(datasets.Value("string")),
                        "7": datasets.Sequence(datasets.Value("string")),
                        "8": datasets.Sequence(datasets.Value("string")),
                        "9": datasets.Sequence(datasets.Value("string")),
                    },
                    "id": datasets.Value("int32"),
                    "solution": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
                }
            ),
            supervised_keys=None,
            homepage=_HOME_PAGE,
            citation=_CITATION,
            license=_LICENSE,
        )

    
    def _split_generators(self, dl_manager):

        # Download and extract images
        image_path = dl_manager.download_and_extract(self.config.image_url)

        # Download and extract the dataset archive
        archive_path = dl_manager.download_and_extract(self.config.data_url)

        # Define the dataset splits
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"archive_path": archive_path, "image_path": image_path, "dl_manager": dl_manager}
            )
        ]
        

    def _generate_examples(self, archive_path, image_path, dl_manager):
        """Yields examples from the archive, assuming JSON files inside a 'json' folder."""

        # list items in archive path
        print(os.listdir(archive_path))

        dirs = [d for d in os.listdir(archive_path) if d != "__MACOSX"]
        top_dir = dirs[0]

        json_dir = os.path.join(archive_path, top_dir, "json")
        json_files = os.listdir(json_dir)

        for i, file_name in enumerate(json_files):

            print(i, file_name)

            with open(os.path.join(archive_path, top_dir, "json", file_name), 'r') as f:
                # Read and parse the JSON content
                json_content = json.load(f)

            # sudoku image paths
            sudoku_images = json_content.get("images")
            sudoku_images = [os.path.join(image_path, x) if x is not None else None for row in sudoku_images for x in row]
            sudoku_images = [sudoku_images[i:i+9] for i in range(0, len(sudoku_images), 9)]

            # option image paths
            option_images = json_content.get("options")
            max_options = len(option_images[0])
            option_images = [os.path.join(image_path, x) if x is not None else None for row in option_images for x in row]
            option_images = [option_images[i:i+max_options] for i in range(0, len(option_images), max_options)]

            # Extract the specific fields from the JSON
            extracted_data = {
                "sudoku": sudoku_images,
                "options": option_images,
                "attributes": json_content.get("map_number_to_attributes"),
                "file_name": file_name,        # The name of the file inside the archive
                "id": i,  # Extract the 'id' field
                "name": json_content.get("name"),  # Extract the 'name' field
                "solution": json_content.get("solution")  # Extract the 'puzzle' field
            }
            
            # Yield the extracted data
            yield i, extracted_data