File size: 3,007 Bytes
e2ff229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54e6873
 
 
e2ff229
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import datasets
import pandas as pd

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {parking-space-detection-dataset},
author = {TrainingDataPro},
year = {2023}
}
"""

_DESCRIPTION = """\
The dataset consists of images of parking spaces along with corresponding bounding box
masks. In order to facilitate object detection and localization, every parking space in
the images is annotated with a bounding box mask.
The bounding box mask outlines the boundary of the parking space, marking its position
and shape within the image. This allows for accurate identification and extraction of
individual parking spaces. Each parking spot is also labeled in accordance to its
occupancy: free, not free or partially free.
This dataset can be leveraged for a range of applications such as parking lot
management, autonomous vehicle navigation, smart city implementations, and traffic
analysis.
"""
_NAME = "parking-space-detection-dataset"

_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"

_LICENSE = ""

_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"


class ParkingSpaceDetectionDataset(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("int32"),
                    "image": datasets.Image(),
                    "mask": datasets.Image(),
                    "bboxes": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        images = dl_manager.download(f"{_DATA}images.tar.gz")
        masks = dl_manager.download(f"{_DATA}boxes.tar.gz")
        annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
        images = dl_manager.iter_archive(images)
        masks = dl_manager.iter_archive(masks)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "images": images,
                    "masks": masks,
                    "annotations": annotations,
                },
            ),
        ]

    def _generate_examples(self, images, masks, annotations):
        annotations_df = pd.read_csv(annotations)

        for idx, ((image_path, image), (mask_path, mask)) in enumerate(
            zip(images, masks)
        ):
            yield idx, {
                "id": annotations_df.loc[annotations_df["image_name"] == image_path][
                    "image_id"
                ].values[0],
                "image": {"path": image_path, "bytes": image.read()},
                "mask": {"path": mask_path, "bytes": mask.read()},
                "bboxes": annotations_df.loc[
                    annotations_df["image_name"] == image_path
                ]["annotations"].values[0],
            }