File size: 2,469 Bytes
fff2423 4b01d0b 95ab410 4b01d0b 95ab410 4b01d0b fff2423 64e25d3 fff2423 882edd3 fff2423 67457ee fff2423 049559c fff2423 64e25d3 fff2423 64e25d3 fff2423 5d66ba4 64e25d3 fff2423 64e25d3 fff2423 64e25d3 da1d304 64e25d3 da1d304 fff2423 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import os
import datasets
#datasets.logging.set_verbosity_debug()
#datasets.logging.set_verbosity_info()
#logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
A segmentation dataset for [TODO: complete...]
"""
_HOMEPAGE = "https://huggingface.co/datasets/alkzar90/cell_benchmark"
_EXTENSION = [".jpg", ".png"]
_URL_BASE = "https://huggingface.co/datasets/alkzar90/cell_benchmark/resolve/main/data/"
_SPLIT_URLS = {
"train": _URL_BASE + "train.zip",
"val": _URL_BASE + "val.zip",
"test": _URL_BASE + "test.zip",
"masks_train": _URL_BASE + "masks/train.zip",
"masks_val": _URL_BASE + "masks/val.zip",
"masks_test": _URL_BASE + "masks/test.zip",
}
class Cellsegmentation(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features({
"image": datasets.Image(),
"masks": datasets.Image(),
#"path" : datasets.Value("string"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
supervised_keys=("image", "masks"),
homepage=_HOMEPAGE,
citation="",
)
def _split_generators(self, dl_manager):
data_files = dl_manager.download_and_extract(_SPLIT_URLS)
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files" : dl_manager.iter_files([data_files["train"]]),
"masks": dl_manager.iter_files([data_files["masks_train"]]),
"split": "training",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"files" : dl_manager.iter_files([data_files["val"]]),
"masks": dl_manager.iter_files([data_files["masks_val"]]),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files" : dl_manager.iter_files([data_files["test"]]),
"masks": dl_manager.iter_files([data_files["masks_test"]]),
"split": "test",
}
)
]
return splits
def _generate_examples(self, files, masks, split):
for i, path in enumerate(zip(files, masks)):
yield i, {
"image": path[0],
"masks": path[1],
}
|