face_segmentation / face_segmentation.py
vkashko's picture
refactor: names
fa8fd71
import datasets
import pandas as pd
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {face_segmentation},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
An example of a dataset that we've collected for a photo edit App.
The dataset includes 20 selfies of people (man and women)
in segmentation masks and their visualisations.
"""
_NAME = 'face_segmentation'
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
class FaceSegmentation(datasets.GeneratorBasedBuilder):
"""Small sample of image-text pairs"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
'image': datasets.Image(),
'mask': datasets.Image(),
'id': datasets.Value('string'),
'gender': datasets.Value('string'),
'age': datasets.Value('int8')
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}images.tar.gz")
masks = dl_manager.download(f"{_DATA}masks.tar.gz")
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
images = dl_manager.iter_archive(images)
masks = dl_manager.iter_archive(masks)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
'masks': masks,
'annotations': annotations
}),
]
def _generate_examples(self, images, masks, annotations):
annotations_df = pd.read_csv(annotations, sep=';')
for idx, ((image_path, image),
(mask_path, mask)) in enumerate(zip(images, masks)):
yield idx, {
"image": {
"path": image_path,
"bytes": image.read()
},
"mask": {
"path": mask_path,
"bytes": mask.read()
},
'id': annotations_df['id'].iloc[idx],
'gender': annotations_df['gender'].iloc[idx],
'age': annotations_df['age'].iloc[idx]
}