File size: 5,847 Bytes
b811f30 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
import json
import datasets
import pandas as pd
id_to_original = {
"1": "5-5-10-H-A1000C 100h-30k-3-crop",
"2": "5-5-A1000C 100h-30k-9 crop",
"3": "5-5-A1000C 100h-30k-9 crop2",
"4": "5-5-A1000C 100h-30k-9-crop",
"5": "5k-Cr-10-10-20Fe-H-Ageing1200C 4h-6-crop",
"6": "Cr-5-5-10Fe-A1200C 4h-6 crop1",
"7": "Cr-5-5-10Fe-A1200C 4h-6 crop2",
"8": "Cr-5-5-10Fe-H1400-20h-A800-240h-80k-9crop1",
"9": "Cr-5-5-10Fe-H1400-20h-A800-240h-80k-9crop2",
"10": "Cr-5-5-10Fe-H1400-20h-A800-240h-80k-10 crop",
"11": "Cr-5-5-10Fe-H1400-20h-A800-240h-80k-10 crop2",
"12": "Cr-5-5-10Fe-H1400-20h-A1000-20h-50k-10 crop",
"13": "Cr-5-5-10Fe-H1400-20h-A1000-240h-30k-8 crop2",
"14": "Cr-5-5-A1200C 4h-20k-5-crop1",
"15": "Cr-5-5-A1200C 4h-20k-5-crop2",
"16": "Cr-10-10-20Fe-H20h-A1200C 20h-7-crop1",
"17": "J955-H2-7-crop1",
"18": "J955-H2-7-crop2",
"19": "Cr-10-10-20Fe-A100h-1-crop1",
"20": "Cr-10-10-20Fe-A100h-4-crop1",
"21": "Cr-10Ni-10Al-20Fe-8 crop1",
"22": "Cr-10Ni-10Al-20Fe-8 crop2",
"23": "Cr-10Ni-10Al-20Fe-H1400C20h-9 crop1",
"24": "Cr-10Ni-10Al-20Fe-H1400C20h-9 crop2",
}
ids_split = {
datasets.Split.TEST: [
"1",
"5",
"9",
"14",
"20",
],
datasets.Split.VALIDATION: [
"2",
"7",
"18",
"22",
],
datasets.Split.TRAIN: [
"3",
"4",
"6",
"8",
"10",
"11",
"12",
"13",
"15",
"16",
"17",
"19",
"21",
"23",
"24",
]
}
_CITATION = """\
@article{xia2023Accurate,
author = {Zeyu Xia and Kan Ma and Sibo Cheng and Thomas Blackburn and Ziling Peng and Kewei Zhu and Weihang Zhang and Dunhui Xiao and Alexander J Knowles and Rossella Arcucci},
copyright = {CC BY-NC 3.0},
doi = {10.1039/d3cp00402c},
issn = {1463-9076},
journal = {Physical Chemistry Chemical Physics},
keywords = {},
language = {English},
month = {6},
number = {23},
pages = {15970--15987},
pmid = {37265373},
publisher = {Royal Society of Chemistry (RSC)},
title = {Accurate Identification and Measurement of the Precipitate Area by Two-Stage Deep Neural Networks in Novel Chromium-Based Alloy},
url = {https://doi.org/10.1039/d3cp00402c},
volume = {25},
year = {2023}
}
"""
_DESCRIPTION = 'A comprehensive, two-tiered deep learning approach designed for precise object detection and segmentation in electron microscopy (EM) images.'
_CATEGORIES = ["precipitate"]
_HOMEPAGE = 'https://github.com/xiazeyu/DT_SegNet'
_LICENSE = 'CC BY-NC 3.0'
def convert_image(image_path):
with open(image_path, "rb") as image_file:
return image_file.read()
# return Image.open(image_path)
def convert_json(json_path):
with open(json_path, "r") as json_file:
json_str = json.dumps(json.load(json_file))
return json_str # .encode('utf-8')
def convert_txt(txt_path):
yolo_data = {"bbox": [], "category": []}
# Open and read the text file
with open(txt_path, "r") as file:
for line in file:
# Split each line into components
parts = line.strip().split()
# The first part is the category, which is added directly to the 'category' list
yolo_data["category"].append(int(parts[0]))
# The rest of the parts are the bounding box coordinates, which need to be converted to floats
# and added as a sublist to the 'bbox' list
bbox = [float(coord) for coord in parts[1:]]
yolo_data["bbox"].append(bbox)
return yolo_data
def get_ds(pfx):
image_array = []
seg_annotation_array = []
raw_seg_annotation_array = []
det_annotation_array = []
for img_idx in ids_split[pfx]:
ydt = convert_txt(f"{pfx}/{img_idx}_label.txt")
det_annotation_array.append({
"bbox": ydt["bbox"],
"category": ydt["category"],
})
image_array.append(convert_image(f"{pfx}/{img_idx}.png"))
seg_annotation_array.append(convert_image(f"{pfx}/{img_idx}_label.png"))
raw_seg_annotation_array.append(convert_json(f"{pfx}/{img_idx}.json"))
data = {
"id": ids_split[pfx],
"original_name": [id_to_original[file] for file in ids_split[pfx]],
"image": image_array,
"det_annotation": det_annotation_array,
"seg_annotation": seg_annotation_array,
"raw_seg_annotation": raw_seg_annotation_array,
}
df = pd.DataFrame(data)
features = datasets.Features({
'id': datasets.Value('int8'),
'original_name': datasets.Value('string'),
'image': datasets.Image(),
"det_annotation": datasets.Sequence(
{
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
"category": datasets.ClassLabel(num_classes=1, names=_CATEGORIES),
}
),
'seg_annotation': datasets.Image(),
'raw_seg_annotation': datasets.Value(dtype='string'),
})
data_info = datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
ds = datasets.Dataset.from_pandas(df,
features=features,
info=data_info,
split=pfx)
ds.VERSION = datasets.Version("1.0.0")
return ds
ddd = datasets.DatasetDict(
{
str(datasets.Split.TRAIN): get_ds(datasets.Split.TRAIN),
str(datasets.Split.VALIDATION): get_ds(datasets.Split.VALIDATION),
str(datasets.Split.TEST): get_ds(datasets.Split.TEST),
}
)
# ddd.save_to_disk('data/')
# ddd.push_to_hub('xiazeyu/DT_SegNet')
|