File size: 2,404 Bytes
fff2423
 
4b01d0b
 
 
 
 
fff2423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
882edd3
fff2423
 
 
 
 
 
 
 
 
049559c
fff2423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d66ba4
fff2423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0747b14
 
 
 
da1d304
0747b14
 
 
da1d304
fff2423
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import os
import datasets

datasets.logging.set_verbosity_debug()
#datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """\
A segmentation dataset for [TODO: complete...]
"""


_HOMEPAGE = "https://huggingface.co/datasets/alkzar90/cell_benchmark"
_EXTENSION = [".jpg", ".png"]
_URL_BASE = "https://huggingface.co/datasets/alkzar90/cell_benchmark/resolve/main/data/"
_SPLIT_URLS = {
  "train":  _URL_BASE + "train.zip",
  "val":    _URL_BASE + "val.zip",
  "test":   _URL_BASE + "test.zip",
  "masks":  _URL_BASE + "masks.zip",
}



class Cellsegmentation(datasets.GeneratorBasedBuilder):

  def _info(self):
    features = datasets.Features({
         "image": datasets.Image(),
         "masks": datasets.Image(),
         "path" : datasets.Value("string"),
      })
    return datasets.DatasetInfo(
        description=_DESCRIPTION,
        features=datasets.Features(features),
        supervised_keys=("image", "masks"),
        homepage=_HOMEPAGE,
        citation="",
    )


  def _split_generators(self, dl_manager):
    data_files = dl_manager.download_and_extract(_SPLIT_URLS)
    splits = [
           datasets.SplitGenerator(
              name=datasets.Split.TRAIN,
              gen_kwargs={
                  "files" : dl_manager.iter_files([data_files["train"]]),
                  "masks": data_files["masks"],
                  "split":  "training",
              },
           ),
           datasets.SplitGenerator(
              name=datasets.Split.VALIDATION,
              gen_kwargs={
                  "files" : dl_manager.iter_files([data_files["val"]]),
                  "masks": data_files["masks"],
                  "split": "validation",
              },
           ),
           datasets.SplitGenerator(
              name=datasets.Split.TEST,
              gen_kwargs={
                  "files" : dl_manager.iter_files([data_files["test"]]),
                  "masks": data_files["masks"],
                  "split": "test",
              }
           )
    ]
    return splits


  def _generate_examples(self, files, masks, split):
    mask_path = os.path.basename(masks)
    for i, path in enumereate(files):
      file_name = os.path.basename(path)
      yield i, {
           "image": path,
           "masks": mask_path + "mask_" + file_name.replace("jpg", "png"),
           "path": path,
      }