Yuhan Hou commited on
Commit
cd241bf
1 Parent(s): 26e1b4b
Files changed (1) hide show
  1. FracAtlas_dataset.py +242 -0
FracAtlas_dataset.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Sun Feb 18 23:13:51 2024
5
+
6
+ @author: houyuhan
7
+ """
8
+
9
+
10
+ #Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
11
+ #
12
+ # Licensed under the Apache License, Version 2.0 (the "License");
13
+ # you may not use this file except in compliance with the License.
14
+ # You may obtain a copy of the License at
15
+ #
16
+ # http://www.apache.org/licenses/LICENSE-2.0
17
+ #
18
+ # Unless required by applicable law or agreed to in writing, software
19
+ # distributed under the License is distributed on an "AS IS" BASIS,
20
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ # See the License for the specific language governing permissions and
22
+ # limitations under the License.
23
+ """
24
+ FracAtlas Dataset Loader
25
+
26
+ This script provides a Hugging Face `datasets` loader for the FracAtlas dataset, a comprehensive collection
27
+ of musculoskeletal radiographs aimed at advancing research in fracture classification, localization, and segmentation.
28
+ The dataset includes high-quality X-Ray images accompanied by detailed annotations in COCO JSON format for segmentation
29
+ and bounding box information, as well as PASCAL VOC XML files for additional localization data.
30
+
31
+ The loader handles downloading and preparing the dataset, making it readily available for machine learning models and analysis
32
+ tasks in medical imaging, especially focusing on the detection and understanding of bone fractures.
33
+
34
+ License: CC-BY 4.0
35
+ """
36
+
37
+
38
+ import csv
39
+ import json
40
+ import os
41
+ from typing import List
42
+ import datasets
43
+ import logging
44
+ import pandas as pd
45
+ from sklearn.model_selection import train_test_split
46
+ import shutil
47
+ import xml.etree.ElementTree as ET
48
+
49
+
50
+ # TODO: Add BibTeX citation
51
+ # Find for instance the citation on arxiv or on the dataset repo/website
52
+ _CITATION = """\
53
+ @InProceedings{huggingface:yh0701/FracAtlas_dataset,
54
+ title = {FracAtlas: A Dataset for Fracture Classification, Localization and Segmentation of Musculoskeletal Radiographs},
55
+ author={Abedeen, Iftekharul; Rahman, Md. Ashiqur; Zohra Prottyasha, Fatema; Ahmed, Tasnim; Mohmud Chowdhury, Tareque; Shatabda, Swakkhar},
56
+ year={2023}
57
+ }
58
+ """
59
+
60
+ # TODO: Add description of the dataset here
61
+ # You can copy an official description
62
+ _DESCRIPTION = """\
63
+ The "FracAtlas" dataset is a collection of musculoskeletal radiographs for fracture classification, localization, and segmentation.
64
+ It includes 4,083 X-Ray images with annotations in multiple formats.The annotations include bbox, segmentations, and etc.
65
+ The dataset is intended for use in deep learning tasks in medical imaging, specifically targeting the understanding of bone fractures.
66
+ It is freely available under a CC-BY 4.0 license.
67
+ """
68
+
69
+ # TODO: Add a link to an official homepage for the dataset here
70
+ _HOMEPAGE = "https://figshare.com/articles/dataset/The_dataset/22363012"
71
+
72
+ # TODO: Add the licence for the dataset here if you can find it
73
+ _LICENSE = "The dataset is licensed under a CC-BY 4.0 license."
74
+
75
+ # TODO: Add link to the official dataset URLs here
76
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
77
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
78
+ _URL = "https://figshare.com/ndownloader/files/43283628"
79
+
80
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
81
+ class FracAtlasDataset(datasets.GeneratorBasedBuilder):
82
+ """TODO: Short description of my dataset."""
83
+
84
+ _URL = _URL
85
+ VERSION = datasets.Version("1.1.0")
86
+
87
+ def _info(self):
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION,
90
+ features=datasets.Features(
91
+ {
92
+ "image_id": datasets.Value("string"),
93
+ "image_path": datasets.Value("string"),
94
+ "image": datasets.Image(),
95
+ "hand": datasets.Value("int32"),
96
+ "leg":datasets.Value("int32"),
97
+ "hip": datasets.Value("int32"),
98
+ "shoulder": datasets.Value("int32"),
99
+ "mixed": datasets.Value("int32"),
100
+ "hardware": datasets.Value("int32"),
101
+ "multiscan": datasets.Value("int32"),
102
+ "fractured": datasets.Value("int32"),
103
+ "fracture_count": datasets.Value("int32"),
104
+ "frontal": datasets.Value("int32"),
105
+ "lateral": datasets.Value("int32"),
106
+ "oblique": datasets.Value("int32"),
107
+ "segmentation": datasets.Sequence(datasets.Sequence(datasets.Value("float"))),
108
+ "bbox": datasets.Sequence(datasets.Value("float")),
109
+ "area": datasets.Value("float"),
110
+ "width": datasets.Value("int32"),
111
+ "height": datasets.Value("int32"),
112
+ "depth": datasets.Value("int32"),
113
+ "segmented": datasets.Value("int32")
114
+ }
115
+ ),
116
+ # No default supervised_keys (as we have to pass both question
117
+ # and context as input).
118
+ supervised_keys=None,
119
+ homepage=_HOMEPAGE,
120
+ citation=_CITATION
121
+ )
122
+
123
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
124
+ url_to_download = self._URL
125
+ downloaded_files = dl_manager.download_and_extract(url_to_download)
126
+
127
+ # Adjusted path to include 'FracAtlas' directory
128
+ base_path = os.path.join(downloaded_files, 'FracAtlas')
129
+
130
+ # Split the dataset to train/test/validation by 0.7,0.15,0.15
131
+ df = pd.read_csv(os.path.join(base_path, 'dataset.csv'))
132
+ train_df, test_df = train_test_split(df, test_size=0.3)
133
+ validation_df, test_df = train_test_split(test_df, test_size=0.5)
134
+
135
+ # store them back as csv
136
+ train_df.to_csv(os.path.join(base_path, 'train_dataset.csv'), index=False)
137
+ validation_df.to_csv(os.path.join(base_path, 'validation_dataset.csv'), index=False)
138
+ test_df.to_csv(os.path.join(base_path, 'test_dataset.csv'), index=False)
139
+
140
+ annotations_path = os.path.join(base_path, 'Annotations/COCO JSON/COCO_fracture_masks.json')
141
+ images_path = os.path.join(base_path, 'images')
142
+ localization_path = os.path.join(base_path, 'Annotations/PASCAL VOC')
143
+
144
+ return [
145
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'train_dataset.csv'),
146
+ "images_path": images_path,
147
+ "annotations_path": annotations_path,
148
+ "localization_path":localization_path
149
+ }),
150
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'validation_dataset.csv'),
151
+ "images_path": images_path,
152
+ "annotations_path": annotations_path,
153
+ "localization_path":localization_path
154
+ }),
155
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'test_dataset.csv'),
156
+ "images_path": images_path,
157
+ "annotations_path": annotations_path,
158
+ "localization_path":localization_path
159
+ })
160
+ ]
161
+
162
+ def _generate_examples(self, annotations_path, images_path, dataset_csv_path,localization_path):
163
+ logging.info("Generating examples from = %s", dataset_csv_path)
164
+ split_df = pd.read_csv(dataset_csv_path) # Load the DataFrame for the current split
165
+
166
+ # Function to convert numeric ID to formatted string
167
+ def format_image_id(numeric_id):
168
+ return f"IMG{numeric_id:07d}.jpg" # Adjust format as needed
169
+
170
+ # Function to extract information from xml files
171
+ def parse_xml(xml_path):
172
+ tree = ET.parse(xml_path)
173
+ root = tree.getroot()
174
+
175
+ # Extract the necessary information
176
+ width = int(root.find("./size/width").text)
177
+ height = int(root.find("./size/height").text)
178
+ depth = int(root.find("./size/depth").text)
179
+ segmented = int(root.find("./segmented").text)
180
+ return width, height, depth, segmented
181
+
182
+ # Load annotations
183
+ with open(annotations_path) as file:
184
+ annotations_json = json.load(file)
185
+
186
+ for item in annotations_json['annotations']:
187
+ item['image_id'] = format_image_id(item['image_id'])
188
+
189
+ annotations = {item['image_id']: item for item in annotations_json['annotations']}
190
+
191
+
192
+ # Iterate through each row in the split DataFrame
193
+ for _, row in split_df.iterrows():
194
+ image_id = row['image_id']
195
+ # Determine the folder based on the 'fractured' column
196
+ folder = 'Fractured' if row['fractured'] == 1 else 'Non_fractured'
197
+
198
+ # Check if the formatted_image_id exists in annotations
199
+ annotation = annotations.get(image_id)
200
+ image_path = os.path.join(images_path, folder, image_id)
201
+
202
+
203
+ if annotation:
204
+ segmentation = annotation.get('segmentation') # Ensure a list of lists even if empty
205
+ bbox = annotation.get('bbox') # Same as above
206
+ area = annotation.get('area') # Default to 0.0 if missing
207
+ else:
208
+ segmentation, bbox, area = [[0.0]], [0.0], 0.0 # Default values for missing annotation
209
+
210
+ xml_file_name = f"{image_id.split('.')[0]}.xml"
211
+ xml_path = os.path.join(localization_path, xml_file_name)
212
+
213
+ # Parse the XML file
214
+ width, height, depth, segmented = parse_xml(xml_path)
215
+
216
+ # Construct example data
217
+ example_data = {
218
+ "image_id": row['image_id'],
219
+ "image_path": image_path,
220
+ "image":image_path,
221
+ "hand": row["hand"],
222
+ "leg": row["leg"],
223
+ "hip": row["hip"],
224
+ "shoulder": row["shoulder"],
225
+ "mixed": row["mixed"],
226
+ "hardware": row["hardware"],
227
+ "multiscan": row["multiscan"],
228
+ "fractured": row["fractured"],
229
+ "fracture_count": row["fracture_count"],
230
+ "frontal": row["frontal"],
231
+ "lateral": row["lateral"],
232
+ "oblique": row["oblique"],
233
+ "segmentation": segmentation,
234
+ "bbox": bbox,
235
+ "area": area,
236
+ "width": width,
237
+ "height": height,
238
+ "depth": depth,
239
+ "segmented": segmented
240
+ }
241
+ yield image_id, example_data
242
+