|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {hair-detection-and-segmentation}, |
|
author = {TrainingDataPro}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The dataset consists of images of parking spaces along with corresponding bounding box |
|
masks. In order to facilitate object detection and localization, every parking space in |
|
the images is annotated with a bounding box mask. |
|
The bounding box mask outlines the boundary of the parking space, marking its position |
|
and shape within the image. This allows for accurate identification and extraction of |
|
individual parking spaces. Each parking spot is also labeled in accordance to its |
|
occupancy: free, not free or partially free. |
|
This dataset can be leveraged for a range of applications such as parking lot |
|
management, autonomous vehicle navigation, smart city implementations, and traffic |
|
analysis. |
|
""" |
|
_NAME = "hair-detection-and-segmentation" |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" |
|
|
|
_LICENSE = "" |
|
|
|
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" |
|
|
|
|
|
class HairDetectionAndSegmentation(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"image": datasets.Image(), |
|
"mask": datasets.Image(), |
|
"collage": datasets.Image(), |
|
"shapes": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
images = dl_manager.download(f"{_DATA}images.tar.gz") |
|
masks = dl_manager.download(f"{_DATA}masks.tar.gz") |
|
collages = dl_manager.download(f"{_DATA}collages.tar.gz") |
|
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv") |
|
images = dl_manager.iter_archive(images) |
|
masks = dl_manager.iter_archive(masks) |
|
collages = dl_manager.iter_archive(collages) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": images, |
|
"masks": masks, |
|
"collages": collages, |
|
"annotations": annotations, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, images, masks, collages, annotations): |
|
annotations_df = pd.read_csv(annotations) |
|
|
|
for idx, ( |
|
(image_path, image), |
|
(mask_path, mask), |
|
(collage_path, collage), |
|
) in enumerate(zip(images, masks, collages)): |
|
yield idx, { |
|
"id": annotations_df.loc[annotations_df["image_name"] == image_path][ |
|
"image_id" |
|
].values[0], |
|
"image": {"path": image_path, "bytes": image.read()}, |
|
"mask": {"path": mask_path, "bytes": mask.read()}, |
|
"collage": {"path": collage_path, "bytes": collage.read()}, |
|
"shapes": annotations_df.loc[ |
|
annotations_df["image_name"] == image_path |
|
]["annotations"].values[0], |
|
} |
|
|