Datasets:

Modalities:
Image
Formats:
parquet
Languages:
English
DOI:
Libraries:
Datasets
Dask
License:
plantorgans / plantorgans.py
jpodivin's picture
Multiple configs
3d18b90
raw
history blame
6.66 kB
import datasets
import pandas as pd
import glob
from pathlib import Path
from PIL import Image, ImageOps
_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
_HOMEPAGE = "https://huggingface.co/datasets/jpodivin/plantorgans"
_CITATION = """"""
_LICENSE = "MIT"
_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
_TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
_TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
_MASKS_URLS = [_BASE_URL + f"masks.tar.0{i}" for i in range(0, 2)]
_SEMANTIC_MASKS_URLS = [_BASE_URL + f"semantic_masks.tar.0{i}" for i in range(0, 2)]
_SEMANTIC_METADATA_URLS = {
'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_semantic_train.csv',
'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_semantic_test.csv'
}
_PANOPTIC_METADATA_URLS = {
'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_train.csv',
'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_test.csv'
}
class PlantOrgansConfig(datasets.BuilderConfig):
"""Builder Config for PlantOrgans"""
def __init__(self, data_urls, metadata_urls, splits, **kwargs):
"""BuilderConfig for PlantOrgans.
Args:
data_urls: list of `string`s, urls to download the zip files from.
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_urls = data_urls
self.metadata_urls = metadata_urls
self.splits = splits
class PlantOrgans(datasets.GeneratorBasedBuilder):
"""Plantorgans dataset
"""
BUILDER_CONFIGS = [
PlantOrgansConfig(
name="semantic_segmentation_full",
description="This configuration contains segmentation masks.",
data_urls=_BASE_URL,
metadata_urls=_SEMANTIC_METADATA_URLS,
splits=['train', 'test'],
),
PlantOrgansConfig(
name="instance_segmentation_full",
description="This configuration contains segmentation masks.",
data_urls=_BASE_URL,
metadata_urls=_PANOPTIC_METADATA_URLS,
splits=['train', 'test'],
),
]
def _info(self):
features=datasets.Features(
{
"image": datasets.Image(),
"mask": datasets.Image(),
"image_name": datasets.Value(dtype="string"),
"class": datasets.ClassLabel(
names=['Fruit', 'Leaf', 'Flower', 'Stem']),
})
if self.config.name == 'instance_segmentation_full':
features['score'] = datasets.Value(dtype="double")
else:
features['class'] = datasets.ClassLabel(
names=['Fruit', 'Leaf', 'Flower', 'Stem'])
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("image", "mask"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
train_archives_paths = dl_manager.download_and_extract(_TRAIN_URLS)
test_archives_paths = dl_manager.download_and_extract(_TEST_URLS)
train_paths = []
test_paths = []
for p in train_archives_paths:
train_paths.extend(glob.glob(str(p)+'/sourcedata/labeled/**.jpg'))
for p in test_archives_paths:
test_paths.extend(glob.glob(str(p)+'/sourcedata/labeled/**.jpg'))
if self.config.name == 'instance_segmentation_full':
metadata_urls = _PANOPTIC_METADATA_URLS
mask_urls = _MASKS_URLS
mask_glob = '/_masks/**.png'
else:
metadata_urls = _SEMANTIC_METADATA_URLS
mask_urls = _SEMANTIC_MASKS_URLS
mask_glob = '/semantic_masks/**.png'
split_metadata_paths = dl_manager.download(metadata_urls)
mask_archives_paths = dl_manager.download_and_extract(mask_urls)
mask_paths = []
for p in mask_archives_paths:
mask_paths.extend(glob.glob(str(p)+mask_glob))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": train_paths,
"metadata_path": split_metadata_paths["train"],
"masks_path": mask_paths,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": test_paths,
"metadata_path": split_metadata_paths["test"],
"masks_path": mask_paths,
},
),
]
def _generate_examples(self, images, metadata_path, masks_path):
"""
images: path to image directory
metadata_path: path to metadata csv
masks_path: path to masks
"""
# Get local image paths
image_paths = pd.DataFrame(
[(str(Path(*Path(e).parts[-3:])), e) for e in images], columns=['image', 'image_path'])
# Get local mask paths
masks_paths = pd.DataFrame(
[(str(Path(*Path(e).parts[-2:])), e) for e in masks_path], columns=['mask', 'mask_path'])
# Get all common about images and masks from csv
metadata = pd.read_csv(metadata_path)
# Merge dataframes
metadata = metadata.merge(masks_paths, on='mask', how='inner')
metadata = metadata.merge(image_paths, on='image', how='inner')
# Make examples and yield
for i, r in metadata.iterrows():
# Example contains paths to mask, source image, certainty of label,
# and name of source image.
example = {
'mask': r['mask_path'],
'image': r['image_path'],
'image_name': Path(r['image_path']).parts[-1],
'class': r['class']
}
if self.config.name == 'instance_segmentation_full':
example['score'] = r['score']
else:
example['class'] = r['class']
yield i, example