Datasets:

License:
CLEVR-Sudoku / CLEVR-Sudoku.py
toniwuest's picture
Update CLEVR-Sudoku.py
db23d1c verified
import os
import json
import datasets
_DESCRIPTION = """\
CLEVR-Sudoku is a dataset for the task of Sudoku puzzle solving. It is a synthetic dataset generated using the CLEVR engine. The dataset consists of 3x3 Sudoku puzzles with varying levels of difficulty. The dataset is divided into three categories based on the number of known cells in the puzzle: Easy (K10), Medium (K30), and Hard (K50). Each puzzle is accompanied by a set of 10 possible solutions. The dataset is generated using the CLEVR engine and is available in the form of images and JSON files. The images are 256x256 pixels in size and are stored in the PNG format. The JSON files contain the puzzle, the solution, and the possible solutions in the form of a dictionary. The dataset is available for download in the form of a zip file. The dataset is intended for use in the development of machine learning models for the task of Sudoku puzzle solving.
"""
_CITATION = """\
@article{stammer2024neural,
title={Neural Concept Binder},
author={Stammer, Wolfgang and W{\"u}st, Antonia and Steinmann, David and Kersting, Kristian},
journal={Advances in Neural Information Processing Systems},
year={2024}
}"""
_HOME_PAGE = "https://ml-research.github.io/NeuralConceptBinder/"
_IMAGES_URL = "https://huggingface.co/datasets/AIML-TUDA/CLEVR-Sudoku/blob/main"
_LICENSE = "cc-by-4.0"
_DIR = _IMAGES_URL
_URL_DATA = {
"CLEVR-Easy-K10": [f"{_DIR}/CLEVR-Easy-Sudokus-K10.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
"CLEVR-Easy-K30": [f"{_DIR}/CLEVR-Easy-Sudokus-K30.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
"CLEVR-Easy-K50": [f"{_DIR}/CLEVR-Easy-Sudokus-K50.zip", f"{_DIR}/CLEVR-Easy-1.zip"],
"CLEVR-4-K10": [f"{_DIR}/CLEVR-4-Sudokus-K10.zip", f"{_DIR}/sudoku.zip"],
"CLEVR-4-K30": [f"{_DIR}/CLEVR-4-Sudokus-K30.zip", f"{_DIR}/sudoku.zip"],
"CLEVR-4-K50": [f"{_DIR}/CLEVR-4-Sudokus-K50.zip", f"{_DIR}/sudoku.zip"],
}
class CLEVRSudokuConfig(datasets.BuilderConfig):
"""Builder Config for CLEVR-Sudoku."""
def __init__(self, data_url, image_url, **kwargs):
"""Builder Config for CLEVR-Sudoku.
Args:
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super(CLEVRSudokuConfig, self).__init__(
version=datasets.Version("1.0.0"), **kwargs
)
# if isinstance(data_url, dict):
# self.metadata_urls = data_url
# else:
self.data_url = data_url
self.image_url = image_url
self.metadata_urls = {"train": data_url, "test": None}
class CLEVRSudoku(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CLEVRSudokuConfig(
name=name, description=name, data_url=urls[0], image_url=urls[1]
)
for name, urls in _URL_DATA.items()
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
# TODO: Add features
# "sudoku": datasets.Array2D(shape=(9, 9), dtype=datasets.Image()),
# "options": datasets.Array2D(shape=(9, 10), dtype=datasets.Image()),
# # attributes as dict of features
# "attributes": {
# "key": datasets.Value("int32"),
# "value": datasets.Sequence(datasets.Value("string"))
# },
"id": datasets.Value("int32"),
# "solution": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
}
),
supervised_keys=None,
homepage=_HOME_PAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# TODO: define image directory
meta_data_path = dl_manager.download_and_extract(self.config.image_url)
# archive_path, sudokus, options, labels, attributes = self.get_data(
# dl_manager, self.config.data_url, self.config.image_url
# )
# Download and extract the dataset archive
archive_path = dl_manager.download_and_extract(self.config.data_url)
# unzip file
import zipfile
with zipfile.ZipFile(archive_path,"r") as zip_ref:
unzipped_path = archive_path.split('.')[0]
print(unzipped_path)
zip_ref.extractall(unzipped_path)
files = os.listdir(unzipped_path)
print(files)
# archive_path = self.config.data_url
print("archive path in split generators")
print(type(archive_path))
# Define the dataset splits
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"archive_path": archive_path, "dl_manager": dl_manager}
)
]
# return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# gen_kwargs={
# "image_dir": meta_data_path,
# "sudokus": sudokus,
# "options": options,
# "labels": labels,
# "attributes": attributes,
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# gen_kwargs={
# "image_dir": meta_data_path,
# "sudokus": sudokus,
# "options": options,
# "labels": labels,
# "attributes": attributes,
# },
# ),
# ]
def _generate_examples(self, archive_path, dl_manager):
"""Yields examples from the archive, assuming JSON files inside a 'json' folder."""
print("archive path in generate examples")
print(type(archive_path))
print("generating examples from ", archive_path)
json_file = os.path.join(archive_path, "json", "sudoku_0.json")
json_file = dl_manager.download_and_extract(json_file)
print(os.stat(json_file).st_size == 0)
with open(json_file, "r") as f:
file = json.load(f)
print(file)
json_dir = os.path.join(archive_path, "json")
json_files = os.listdir(json_dir)
for i, file_path in enumerate(json_files):
print(i, file_path)
# Read and parse the JSON content
json_content = json.load(file_obj)
# Extract the specific fields from the JSON
extracted_data = {
# "file_name": file_name, # The name of the file inside the archive
"id": i, # Extract the 'id' field
# "name": json_content.get("name"), # Extract the 'name' field
#"solution": json_content.get("solution") # Extract the 'puzzle' field
}
# Yield the extracted data
yield i, extracted_data
# # This uses `dl_manager.iter_archive` to iterate over files in the archive
# with open(archive_path, "rb") as f:
# for i, (file_name, file_handle) in enumerate(dl_manager.iter_archive(f)):
# # Only process files that are in the 'json/' folder and end with '.json'
# if file_name.startswith("json/") and file_name.endswith(".json"):
# print(i, file_handle)
# # Read and parse the JSON content
# json_content = json.load(file_handle)
# # Extract the specific fields from the JSON
# extracted_data = {
# # "file_name": file_name, # The name of the file inside the archive
# "id": i, # Extract the 'id' field
# # "name": json_content.get("name"), # Extract the 'name' field
# # "solution": json_content.get("solution") # Extract the 'puzzle' field
# }
# # Yield the extracted data
# yield i, extracted_data
# for i, (sudoku, opt, label, attr) in enumerate(
# zip(sudokus, options, labels, attributes)
# ):
# yield i, {
# "sudoku": sudoku,
# "options": opt,
# "label": label,
# "attributes": attr,
# }