Datasets:

ArXiv:
DOI:
License:
SiddiqueAkhonda commited on
Commit
a64db7a
1 Parent(s): 7dd58d2

Upload msynth.py

Browse files
Files changed (1) hide show
  1. msynth.py +398 -0
msynth.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 for msynth dataset
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ '''
15
+ Custom dataset-builder for msynth dataset
16
+ '''
17
+
18
+ import os
19
+ import datasets
20
+ import glob
21
+ import re
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+ _CITATION = """\
26
+ @article{sizikova2023knowledge,
27
+ title={Knowledge-based in silico models and dataset for the comparative evaluation of mammography AI for a range of breast characteristics, lesion conspicuities and doses},
28
+ author={Sizikova, Elena and Saharkhiz, Niloufar and Sharma, Diksha and Lago, Miguel and Sahiner, Berkman and Delfino, Jana G. and Badano, Aldo},
29
+ journal={Advances in Neural Information Processing Systems},
30
+ volume={},
31
+ pages={16764--16778},
32
+ year={2023}
33
+ """
34
+
35
+
36
+ _DESCRIPTION = """\
37
+ M-SYNTH is a synthetic digital mammography (DM) dataset with four breast fibroglandular density distributions imaged using Monte Carlo x-ray simulations with the publicly available Virtual Imaging Clinical Trial for Regulatory Evaluation (VICTRE) toolkit.
38
+ Curated by: Elena Sizikova, Niloufar Saharkhiz, Diksha Sharma, Miguel Lago, Berkman Sahiner, Jana Gut Delfino, Aldo Badano
39
+ License: Creative Commons 1.0 Universal License (CC0)
40
+ """
41
+
42
+
43
+ _HOMEPAGE = "link to the dataset description page (FDA/CDRH/OSEL/DIDSR/VICTRE_project)"
44
+
45
+ _REPO = "https://huggingface.co/datasets/didsr/msynth/resolve/main/data"
46
+
47
+ # satting parameters for the URLS
48
+ _LESIONDENSITY = ["1.0","1.06", "1.1"]
49
+ _DOSE = ["20%","40%","60%","80%","100%"]
50
+ _DENSITY = ["fatty", "dense", "hetero","scattered"]
51
+ _SIZE = ["5.0","7.0", "9.0"]
52
+ _DETECTOR = 'SIM'
53
+
54
+ _DOSETABLE = {
55
+ "dense": {
56
+ "20%": '1.73e09',
57
+ "40%": '3.47e09',
58
+ "60%": '5.20e09',
59
+ "80%": '6.94e09',
60
+ "100%": '8.67e09'
61
+ },
62
+ "hetero": {
63
+ "20%": '2.04e09',
64
+ "40%": '4.08e09',
65
+ "60%": '6.12e09',
66
+ "80%": '8.16e09',
67
+ "100%": '1.02e10'
68
+ },
69
+ "scattered": {
70
+ "20%": '4.08e09',
71
+ "40%": '8.16e09',
72
+ "60%": '1.22e10',
73
+ "80%": '1.63e10',
74
+ "100%": '2.04e10'
75
+ },
76
+ "fatty": {
77
+ "20%": '4.44e09',
78
+ "40%": '8.88e09',
79
+ "60%": '1.33e10',
80
+ "80%": '1.78e10',
81
+ "100%": '2.22e10'
82
+ }
83
+ }
84
+ # Links to download readme files
85
+ _URLS = {
86
+ "meta_data": f"{_REPO}/metadata/bounds.zip",
87
+ "read_me": f"{_REPO}/README.md"
88
+ }
89
+
90
+
91
+
92
+ # Define the labels or classes in your dataset
93
+ #_NAMES = ["raw", "mhd", "dicom", "loc"]
94
+
95
+ DATA_DIR = {"all_data": "SIM", "seg": "SIM", "info": "bounds"}
96
+
97
+ class msynthConfig(datasets.BuilderConfig):
98
+ """msynth dataset"""
99
+ lesion_density = _LESIONDENSITY
100
+ dose = _DOSE
101
+ density = _DENSITY
102
+ size = _SIZE
103
+ def __init__(self, name, **kwargs):
104
+ super(msynthConfig, self).__init__(
105
+ version=datasets.Version("1.0.0"),
106
+ name=name,
107
+ description="msynth",
108
+ **kwargs,
109
+ )
110
+
111
+ class msynth(datasets.GeneratorBasedBuilder):
112
+ """msynth dataset."""
113
+
114
+ DEFAULT_WRITER_BATCH_SIZE = 256
115
+ BUILDER_CONFIGS = [
116
+ msynthConfig("device-data"),
117
+ msynthConfig("segmentation-mask"),
118
+ msynthConfig("metadata"),
119
+ ]
120
+
121
+ def _info(self):
122
+ if self.config.name == "device-data":
123
+ # Define dataset features and keys
124
+ features = datasets.Features(
125
+ {
126
+ "Raw": datasets.Value("string"),
127
+ "mhd": datasets.Value("string"),
128
+ "loc": datasets.Value("string"),
129
+ "dcm": datasets.Value("string"),
130
+ "density": datasets.Value("string"),
131
+ "mass_radius": datasets.Value("float32")
132
+ }
133
+ )
134
+ #keys = ("image", "metadata")
135
+ elif self.config.name == "segmentation-mask":
136
+ # Define features and keys
137
+ features = datasets.Features(
138
+ {
139
+ "Raw": datasets.Value("string"),
140
+ "mhd": datasets.Value("string"),
141
+ "loc": datasets.Value("string"),
142
+ "density": datasets.Value("string"),
143
+ "mass_radius": datasets.Value("float32")
144
+ }
145
+ )
146
+
147
+ elif self.config.name == "metadata":
148
+ # Define features and keys
149
+ features = datasets.Features(
150
+ {
151
+ "fatty": datasets.Value("string"),
152
+ "dense": datasets.Value("string"),
153
+ "hetero": datasets.Value("string"),
154
+ "scattered": datasets.Value("string")
155
+ }
156
+ )
157
+
158
+ return datasets.DatasetInfo(
159
+ description=_DESCRIPTION,
160
+ features=features,
161
+ supervised_keys=None,
162
+ homepage=_HOMEPAGE,
163
+ citation=_CITATION,
164
+ )
165
+
166
+ def _split_generators(
167
+ self, dl_manager: datasets.utils.download_manager.DownloadManager):
168
+ # Setting up the **config_kwargs parameters
169
+ if self.config.lesion_density == "all":
170
+ self.config.lesion_density = _LESIONDENSITY
171
+
172
+ if self.config.dose == "all":
173
+ self.config.dose = _DOSE
174
+
175
+ if self.config.density == "all":
176
+ self.config.density = _DENSITY
177
+
178
+ if self.config.size == "all":
179
+ self.config.size = _SIZE
180
+
181
+
182
+ if self.config.name == "device-data":
183
+ file_name = []
184
+ for ld in self.config.lesion_density:
185
+ for ds in self.config.dose:
186
+ for den in self.config.density:
187
+ value = _DOSETABLE[den][ds]
188
+ for sz in self.config.size:
189
+ temp_name = []
190
+ temp_name = (
191
+ "device_data_VICTREPhantoms_spic_"
192
+ + ld
193
+ + "/"
194
+ + value
195
+ + "/"
196
+ + den
197
+ + "/2/"
198
+ + sz
199
+ + "/"
200
+ + _DETECTOR
201
+ + ".zip"
202
+ )
203
+ file_name.append(_REPO +"/"+ temp_name)
204
+
205
+ # Downloading the data files
206
+ # data_dir = dl_manager.download_and_extract(file_name)
207
+ data_dir = []
208
+ for url in file_name:
209
+ try:
210
+ temp_down_file = []
211
+ # Attempt to download the file
212
+ temp_down_file = dl_manager.download_and_extract(url)
213
+ data_dir.append(temp_down_file)
214
+
215
+ except Exception as e:
216
+ # If an exception occurs (e.g., file not found), log the error and add the URL to the failed_urls list
217
+ logger.error(f"Failed to download {url}: {e}")
218
+
219
+ return [
220
+ datasets.SplitGenerator(
221
+ name="device-data",
222
+ gen_kwargs={
223
+ "files": [data_dir_t for data_dir_t in data_dir],
224
+ "name": "all_data",
225
+ },
226
+ ),
227
+ ]
228
+
229
+ elif self.config.name == "segmentation-mask":
230
+ seg_file_name = []
231
+ for den in self.config.density:
232
+ for sz in self.config.size:
233
+ temp_name = []
234
+ temp_name = (
235
+ "segmentation_masks"
236
+ + "/"
237
+ + den
238
+ + "/2/"
239
+ + sz
240
+ + "/"
241
+ + _DETECTOR
242
+ + ".zip"
243
+ )
244
+ seg_file_name.append(_REPO+ "/" + temp_name)
245
+
246
+ # Downloading the files
247
+ seg_dir = []
248
+ #seg_dir = dl_manager.download_and_extract(seg_file_name)
249
+
250
+ for url in seg_file_name:
251
+ try:
252
+ # Attempt to download the file
253
+ temp_down_file = []
254
+ temp_down_file = dl_manager.download_and_extract(url)
255
+ seg_dir.append(temp_down_file)
256
+
257
+ except Exception as e:
258
+ # If an exception occurs (e.g., file not found), log the error and add the URL to the failed_urls list
259
+ logger.error(f"Failed to download {url}: {e}")
260
+
261
+ return [
262
+ datasets.SplitGenerator(
263
+ name="segmentation-mask",
264
+ gen_kwargs={
265
+ "files": [data_dir_t for data_dir_t in seg_dir],
266
+ "name": "seg",
267
+ },
268
+ ),
269
+ ]
270
+
271
+ elif self.config.name == "metadata":
272
+ meta_dir = dl_manager.download_and_extract(_URLS['meta_data'])
273
+ return [
274
+ datasets.SplitGenerator(
275
+ name="metadata",
276
+ gen_kwargs={
277
+ "files": meta_dir,
278
+ "name": "info",
279
+ },
280
+ ),
281
+ ]
282
+
283
+
284
+ def get_all_file_paths(self, root_directory):
285
+ file_paths = [] # List to store file paths
286
+
287
+ # Walk through the directory and its subdirectories using os.walk
288
+ for folder, _, files in os.walk(root_directory):
289
+ for file in files:
290
+ if file.endswith('.raw'):
291
+ # Get the full path of the file
292
+ file_path = os.path.join(folder, file)
293
+ file_paths.append(file_path)
294
+ return file_paths
295
+
296
+ def get_support_file_path(self, raw_file_path, ext):
297
+ folder_path = os.path.dirname(raw_file_path)
298
+ # Use os.path.basename() to extract the filename
299
+ raw_file_name = os.path.basename(raw_file_path)
300
+ # Use os.path.splitext() to split the filename into root and extension
301
+ root, extension = os.path.splitext(raw_file_name)
302
+ if ext == "dcm":
303
+ supp_file_name = f"000.{ext}"
304
+ file_path = os.path.join(folder_path,"DICOM_dm",supp_file_name)
305
+ else:
306
+ supp_file_name = f"{root}.{ext}"
307
+ file_path = os.path.join(folder_path, supp_file_name)
308
+
309
+ if os.path.isfile(file_path):
310
+ return file_path
311
+ else:
312
+ return "Not available for this raw file"
313
+
314
+
315
+
316
+ def _generate_examples(self, files, name):
317
+ if self.config.name == "device-data":
318
+ key = 0
319
+ data_dir = []
320
+ for folder in files:
321
+ tmp_dir = []
322
+ tmp_dir = self.get_all_file_paths(os.path.join(folder, DATA_DIR[name]))
323
+ data_dir = data_dir + tmp_dir
324
+
325
+ for path in data_dir:
326
+ res_dic = {}
327
+ for word in _DENSITY:
328
+ if word in path:
329
+ breast_density = word
330
+ pattern = rf"(\d+\.\d+)_{word}"
331
+ match = re.search(pattern, path)
332
+ matched_text = match.group(1)
333
+ break
334
+
335
+ # Get image id to filter the respective row of the csv
336
+ image_id = os.path.basename(path)
337
+ # Use os.path.splitext() to split the filename into root and extension
338
+ root, extension = os.path.splitext(image_id)
339
+ # Get the extension without the dot
340
+ image_labels = extension.lstrip(".")
341
+ res_dic["Raw"] = path
342
+ res_dic["mhd"] = self.get_support_file_path(path, "mhd")
343
+ res_dic["loc"] = self.get_support_file_path(path, "loc")
344
+ if self.config.name == "device-data":
345
+ res_dic["dcm"] = self.get_support_file_path(path, "dcm")
346
+ res_dic["density"] = breast_density
347
+ res_dic["mass_radius"] = matched_text
348
+
349
+ yield key, res_dic
350
+ key += 1
351
+
352
+
353
+ if self.config.name == "segmentation-mask":
354
+ key = 0
355
+ data_dir = []
356
+ for folder in files:
357
+ tmp_dir = []
358
+ tmp_dir = self.get_all_file_paths(os.path.join(folder, DATA_DIR[name]))
359
+ data_dir = data_dir + tmp_dir
360
+
361
+ for path in data_dir:
362
+ res_dic = {}
363
+ for word in _DENSITY:
364
+ if word in path:
365
+ breast_density = word
366
+ pattern = rf"(\d+\.\d+)_{word}"
367
+ match = re.search(pattern, path)
368
+ matched_text = match.group(1)
369
+ break
370
+
371
+ # Get image id to filter the respective row of the csv
372
+ image_id = os.path.basename(path)
373
+ # Use os.path.splitext() to split the filename into root and extension
374
+ root, extension = os.path.splitext(image_id)
375
+ # Get the extension without the dot
376
+ image_labels = extension.lstrip(".")
377
+ res_dic["Raw"] = path
378
+ res_dic["mhd"] = self.get_support_file_path(path, "mhd")
379
+ res_dic["loc"] = self.get_support_file_path(path, "loc")
380
+ res_dic["density"] = breast_density
381
+ res_dic["mass_radius"] = matched_text
382
+
383
+ yield key, res_dic
384
+ key += 1
385
+
386
+ if self.config.name == "metadata":
387
+ key = 0
388
+ examples = list()
389
+ meta_dir = os.path.join(files, DATA_DIR[name])
390
+
391
+ res_dic = {
392
+ "fatty": os.path.join(meta_dir,'bounds_fatty.npy'),
393
+ "dense": os.path.join(meta_dir,'bounds_dense.npy'),
394
+ "hetero": os.path.join(meta_dir,'bounds_hetero.npy'),
395
+ "scattered": os.path.join(meta_dir,'bounds_scattered.npy')
396
+ }
397
+ yield key, res_dic
398
+ key +=1