xiazeyu commited on
Commit
b811f30
1 Parent(s): c92dfb4

Add conversion script

Browse files
Files changed (1) hide show
  1. script.py +195 -0
script.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+ import pandas as pd
5
+
6
+ id_to_original = {
7
+ "1": "5-5-10-H-A1000C 100h-30k-3-crop",
8
+ "2": "5-5-A1000C 100h-30k-9 crop",
9
+ "3": "5-5-A1000C 100h-30k-9 crop2",
10
+ "4": "5-5-A1000C 100h-30k-9-crop",
11
+ "5": "5k-Cr-10-10-20Fe-H-Ageing1200C 4h-6-crop",
12
+ "6": "Cr-5-5-10Fe-A1200C 4h-6 crop1",
13
+ "7": "Cr-5-5-10Fe-A1200C 4h-6 crop2",
14
+ "8": "Cr-5-5-10Fe-H1400-20h-A800-240h-80k-9crop1",
15
+ "9": "Cr-5-5-10Fe-H1400-20h-A800-240h-80k-9crop2",
16
+ "10": "Cr-5-5-10Fe-H1400-20h-A800-240h-80k-10 crop",
17
+ "11": "Cr-5-5-10Fe-H1400-20h-A800-240h-80k-10 crop2",
18
+ "12": "Cr-5-5-10Fe-H1400-20h-A1000-20h-50k-10 crop",
19
+ "13": "Cr-5-5-10Fe-H1400-20h-A1000-240h-30k-8 crop2",
20
+ "14": "Cr-5-5-A1200C 4h-20k-5-crop1",
21
+ "15": "Cr-5-5-A1200C 4h-20k-5-crop2",
22
+ "16": "Cr-10-10-20Fe-H20h-A1200C 20h-7-crop1",
23
+ "17": "J955-H2-7-crop1",
24
+ "18": "J955-H2-7-crop2",
25
+ "19": "Cr-10-10-20Fe-A100h-1-crop1",
26
+ "20": "Cr-10-10-20Fe-A100h-4-crop1",
27
+ "21": "Cr-10Ni-10Al-20Fe-8 crop1",
28
+ "22": "Cr-10Ni-10Al-20Fe-8 crop2",
29
+ "23": "Cr-10Ni-10Al-20Fe-H1400C20h-9 crop1",
30
+ "24": "Cr-10Ni-10Al-20Fe-H1400C20h-9 crop2",
31
+ }
32
+ ids_split = {
33
+ datasets.Split.TEST: [
34
+ "1",
35
+ "5",
36
+ "9",
37
+ "14",
38
+ "20",
39
+ ],
40
+ datasets.Split.VALIDATION: [
41
+ "2",
42
+ "7",
43
+ "18",
44
+ "22",
45
+ ],
46
+ datasets.Split.TRAIN: [
47
+ "3",
48
+ "4",
49
+ "6",
50
+ "8",
51
+ "10",
52
+ "11",
53
+ "12",
54
+ "13",
55
+ "15",
56
+ "16",
57
+ "17",
58
+ "19",
59
+ "21",
60
+ "23",
61
+ "24",
62
+ ]
63
+ }
64
+
65
+ _CITATION = """\
66
+ @article{xia2023Accurate,
67
+ author = {Zeyu Xia and Kan Ma and Sibo Cheng and Thomas Blackburn and Ziling Peng and Kewei Zhu and Weihang Zhang and Dunhui Xiao and Alexander J Knowles and Rossella Arcucci},
68
+ copyright = {CC BY-NC 3.0},
69
+ doi = {10.1039/d3cp00402c},
70
+ issn = {1463-9076},
71
+ journal = {Physical Chemistry Chemical Physics},
72
+ keywords = {},
73
+ language = {English},
74
+ month = {6},
75
+ number = {23},
76
+ pages = {15970--15987},
77
+ pmid = {37265373},
78
+ publisher = {Royal Society of Chemistry (RSC)},
79
+ title = {Accurate Identification and Measurement of the Precipitate Area by Two-Stage Deep Neural Networks in Novel Chromium-Based Alloy},
80
+ url = {https://doi.org/10.1039/d3cp00402c},
81
+ volume = {25},
82
+ year = {2023}
83
+ }
84
+ """
85
+
86
+ _DESCRIPTION = 'A comprehensive, two-tiered deep learning approach designed for precise object detection and segmentation in electron microscopy (EM) images.'
87
+
88
+ _CATEGORIES = ["precipitate"]
89
+
90
+ _HOMEPAGE = 'https://github.com/xiazeyu/DT_SegNet'
91
+
92
+ _LICENSE = 'CC BY-NC 3.0'
93
+
94
+
95
+ def convert_image(image_path):
96
+ with open(image_path, "rb") as image_file:
97
+ return image_file.read()
98
+ # return Image.open(image_path)
99
+
100
+
101
+ def convert_json(json_path):
102
+ with open(json_path, "r") as json_file:
103
+ json_str = json.dumps(json.load(json_file))
104
+ return json_str # .encode('utf-8')
105
+
106
+
107
+ def convert_txt(txt_path):
108
+ yolo_data = {"bbox": [], "category": []}
109
+
110
+ # Open and read the text file
111
+ with open(txt_path, "r") as file:
112
+ for line in file:
113
+ # Split each line into components
114
+ parts = line.strip().split()
115
+
116
+ # The first part is the category, which is added directly to the 'category' list
117
+ yolo_data["category"].append(int(parts[0]))
118
+
119
+ # The rest of the parts are the bounding box coordinates, which need to be converted to floats
120
+ # and added as a sublist to the 'bbox' list
121
+ bbox = [float(coord) for coord in parts[1:]]
122
+ yolo_data["bbox"].append(bbox)
123
+
124
+ return yolo_data
125
+
126
+
127
+ def get_ds(pfx):
128
+ image_array = []
129
+ seg_annotation_array = []
130
+ raw_seg_annotation_array = []
131
+ det_annotation_array = []
132
+
133
+ for img_idx in ids_split[pfx]:
134
+ ydt = convert_txt(f"{pfx}/{img_idx}_label.txt")
135
+ det_annotation_array.append({
136
+ "bbox": ydt["bbox"],
137
+ "category": ydt["category"],
138
+ })
139
+ image_array.append(convert_image(f"{pfx}/{img_idx}.png"))
140
+ seg_annotation_array.append(convert_image(f"{pfx}/{img_idx}_label.png"))
141
+ raw_seg_annotation_array.append(convert_json(f"{pfx}/{img_idx}.json"))
142
+
143
+ data = {
144
+ "id": ids_split[pfx],
145
+ "original_name": [id_to_original[file] for file in ids_split[pfx]],
146
+ "image": image_array,
147
+ "det_annotation": det_annotation_array,
148
+ "seg_annotation": seg_annotation_array,
149
+ "raw_seg_annotation": raw_seg_annotation_array,
150
+ }
151
+
152
+ df = pd.DataFrame(data)
153
+
154
+ features = datasets.Features({
155
+ 'id': datasets.Value('int8'),
156
+ 'original_name': datasets.Value('string'),
157
+ 'image': datasets.Image(),
158
+ "det_annotation": datasets.Sequence(
159
+ {
160
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
161
+ "category": datasets.ClassLabel(num_classes=1, names=_CATEGORIES),
162
+ }
163
+ ),
164
+ 'seg_annotation': datasets.Image(),
165
+ 'raw_seg_annotation': datasets.Value(dtype='string'),
166
+ })
167
+
168
+ data_info = datasets.DatasetInfo(
169
+ description=_DESCRIPTION,
170
+ features=features,
171
+ homepage=_HOMEPAGE,
172
+ license=_LICENSE,
173
+ citation=_CITATION,
174
+ )
175
+
176
+ ds = datasets.Dataset.from_pandas(df,
177
+ features=features,
178
+ info=data_info,
179
+ split=pfx)
180
+
181
+ ds.VERSION = datasets.Version("1.0.0")
182
+
183
+ return ds
184
+
185
+
186
+ ddd = datasets.DatasetDict(
187
+ {
188
+ str(datasets.Split.TRAIN): get_ds(datasets.Split.TRAIN),
189
+ str(datasets.Split.VALIDATION): get_ds(datasets.Split.VALIDATION),
190
+ str(datasets.Split.TEST): get_ds(datasets.Split.TEST),
191
+ }
192
+ )
193
+
194
+ # ddd.save_to_disk('data/')
195
+ # ddd.push_to_hub('xiazeyu/DT_SegNet')