Yuhan Hou commited on
Commit
26e1b4b
1 Parent(s): ddcbc76
Files changed (1) hide show
  1. FracAtlas_dataset.py +0 -227
FracAtlas_dataset.py DELETED
@@ -1,227 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- Created on Sun Feb 18 23:13:51 2024
5
-
6
- @author: houyuhan
7
- """
8
-
9
- # @title Information
10
-
11
- #Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
12
- #
13
- # Licensed under the Apache License, Version 2.0 (the "License");
14
- # you may not use this file except in compliance with the License.
15
- # You may obtain a copy of the License at
16
- #
17
- # http://www.apache.org/licenses/LICENSE-2.0
18
- #
19
- # Unless required by applicable law or agreed to in writing, software
20
- # distributed under the License is distributed on an "AS IS" BASIS,
21
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
- # See the License for the specific language governing permissions and
23
- # limitations under the License.
24
- # TODO: Address all TODOs and remove all explanatory comments
25
- """TODO: Add a description here."""
26
-
27
-
28
- import csv
29
- import json
30
- import os
31
- from typing import List
32
- import datasets
33
- import logging
34
- import pandas as pd
35
- from sklearn.model_selection import train_test_split
36
- import shutil
37
- import xml.etree.ElementTree as ET
38
-
39
-
40
- # TODO: Add BibTeX citation
41
- # Find for instance the citation on arxiv or on the dataset repo/website
42
- _CITATION = """\
43
- @InProceedings{huggingface:dataset,
44
- title = {FracAtlas: A Dataset for Fracture Classification, Localization and Segmentation of Musculoskeletal Radiographs},
45
- author={Abedeen, Iftekharul; Rahman, Md. Ashiqur; Zohra Prottyasha, Fatema; Ahmed, Tasnim; Mohmud Chowdhury, Tareque; Shatabda, Swakkhar},
46
- year={2023}
47
- }
48
- """
49
-
50
- # TODO: Add description of the dataset here
51
- # You can copy an official description
52
- _DESCRIPTION = """\
53
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
54
- """
55
-
56
- # TODO: Add a link to an official homepage for the dataset here
57
- _HOMEPAGE = "https://figshare.com/articles/dataset/The_dataset/22363012"
58
-
59
- # TODO: Add the licence for the dataset here if you can find it
60
- _LICENSE = "The dataset is licensed under a CC-BY 4.0 license."
61
-
62
- # TODO: Add link to the official dataset URLs here
63
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
64
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
65
- _URL = "https://figshare.com/ndownloader/files/43283628"
66
-
67
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
68
- class FracAtlasDataset(datasets.GeneratorBasedBuilder):
69
- """TODO: Short description of my dataset."""
70
-
71
- _URL = _URL
72
- VERSION = datasets.Version("1.1.0")
73
-
74
- def _info(self):
75
- return datasets.DatasetInfo(
76
- description=_DESCRIPTION,
77
- features=datasets.Features(
78
- {
79
- "image_id": datasets.Value("string"),
80
- "image_path": datasets.Value("string"),
81
- "image": datasets.Image(),
82
- "hand": datasets.Value("int32"),
83
- "leg":datasets.Value("int32"),
84
- "hip": datasets.Value("int32"),
85
- "shoulder": datasets.Value("int32"),
86
- "mixed": datasets.Value("int32"),
87
- "hardware": datasets.Value("int32"),
88
- "multiscan": datasets.Value("int32"),
89
- "fractured": datasets.Value("int32"),
90
- "fracture_count": datasets.Value("int32"),
91
- "frontal": datasets.Value("int32"),
92
- "lateral": datasets.Value("int32"),
93
- "oblique": datasets.Value("int32"),
94
- "segmentation": datasets.Sequence(datasets.Sequence(datasets.Value("float"))),
95
- "bbox": datasets.Sequence(datasets.Value("float")),
96
- "area": datasets.Value("float"),
97
- "width": datasets.Value("int32"),
98
- "height": datasets.Value("int32"),
99
- "depth": datasets.Value("int32"),
100
- "segmented": datasets.Value("int32")
101
- }
102
- ),
103
- # No default supervised_keys (as we have to pass both question
104
- # and context as input).
105
- supervised_keys=None
106
- #homepage="https://rajpurkar.github.io/SQuAD-explorer/",
107
- #citation=_CITATION,
108
- )
109
-
110
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
111
- url_to_download = self._URL
112
- downloaded_files = dl_manager.download_and_extract(url_to_download)
113
-
114
- # Adjusted path to include 'FracAtlas' directory
115
- base_path = os.path.join(downloaded_files, 'FracAtlas')
116
-
117
- df = pd.read_csv(os.path.join(base_path, 'dataset.csv'))
118
- train_df, test_df = train_test_split(df, test_size=0.3)
119
- validation_df, test_df = train_test_split(test_df, test_size=0.5)
120
-
121
- train_df.to_csv(os.path.join(base_path, 'train_dataset.csv'), index=False)
122
- validation_df.to_csv(os.path.join(base_path, 'validation_dataset.csv'), index=False)
123
- test_df.to_csv(os.path.join(base_path, 'test_dataset.csv'), index=False)
124
-
125
- annotations_path = os.path.join(base_path, 'Annotations/COCO JSON/COCO_fracture_masks.json')
126
- images_path = os.path.join(base_path, 'images')
127
-
128
- localization_path = os.path.join(base_path, 'Annotations/PASCAL VOC')
129
-
130
- return [
131
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'train_dataset.csv'),
132
- "images_path": images_path,
133
- "annotations_path": annotations_path,
134
- "localization_path":localization_path
135
- }),
136
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'validation_dataset.csv'),
137
- "images_path": images_path,
138
- "annotations_path": annotations_path,
139
- "localization_path":localization_path
140
- }),
141
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'test_dataset.csv'),
142
- "images_path": images_path,
143
- "annotations_path": annotations_path,
144
- "localization_path":localization_path
145
- })
146
- ]
147
-
148
- def _generate_examples(self, annotations_path, images_path, dataset_csv_path,localization_path):
149
- logging.info("Generating examples from = %s", dataset_csv_path)
150
- split_df = pd.read_csv(dataset_csv_path) # Load the DataFrame for the current split
151
-
152
- # Function to convert numeric ID to formatted string
153
- def format_image_id(numeric_id):
154
- return f"IMG{numeric_id:07d}.jpg" # Adjust format as needed
155
-
156
- def parse_xml(xml_path):
157
- tree = ET.parse(xml_path)
158
- root = tree.getroot()
159
-
160
- # Extract the necessary information
161
- width = int(root.find("./size/width").text)
162
- height = int(root.find("./size/height").text)
163
- depth = int(root.find("./size/depth").text)
164
- segmented = int(root.find("./segmented").text)
165
- return width, height, depth, segmented
166
-
167
- # Load annotations
168
- with open(annotations_path) as file:
169
- annotations_json = json.load(file)
170
-
171
- for item in annotations_json['annotations']:
172
- item['image_id'] = format_image_id(item['image_id'])
173
-
174
- annotations = {item['image_id']: item for item in annotations_json['annotations']}
175
-
176
-
177
- # Iterate through each row in the split DataFrame
178
- for _, row in split_df.iterrows():
179
- image_id = row['image_id']
180
- # Determine the folder based on the 'fractured' column
181
- folder = 'Fractured' if row['fractured'] == 1 else 'Non_fractured'
182
-
183
- # Check if the formatted_image_id exists in annotations
184
- annotation = annotations.get(image_id)
185
- image_path = os.path.join(images_path, folder, image_id)
186
-
187
-
188
- if annotation:
189
- segmentation = annotation.get('segmentation') # Ensure a list of lists even if empty
190
- bbox = annotation.get('bbox') # Same as above
191
- area = annotation.get('area') # Default to 0.0 if missing
192
- else:
193
- segmentation, bbox, area = [[0.0]], [0.0], 0.0 # Default values for missing annotation
194
-
195
- xml_file_name = f"{image_id.split('.')[0]}.xml"
196
- xml_path = os.path.join(localization_path, xml_file_name)
197
-
198
- # Parse the XML file
199
- width, height, depth, segmented = parse_xml(xml_path)
200
-
201
- # Construct example data
202
- example_data = {
203
- "image_id": row['image_id'],
204
- "image_path": image_path,
205
- "image":image_path,
206
- "hand": row["hand"],
207
- "leg": row["leg"],
208
- "hip": row["hip"],
209
- "shoulder": row["shoulder"],
210
- "mixed": row["mixed"],
211
- "hardware": row["hardware"],
212
- "multiscan": row["multiscan"],
213
- "fractured": row["fractured"],
214
- "fracture_count": row["fracture_count"],
215
- "frontal": row["frontal"],
216
- "lateral": row["lateral"],
217
- "oblique": row["oblique"],
218
- "segmentation": segmentation,
219
- "bbox": bbox,
220
- "area": area,
221
- "width": width,
222
- "height": height,
223
- "depth": depth,
224
- "segmented": segmented
225
- }
226
- yield image_id, example_data
227
-