|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
import h5py |
|
import numpy as np |
|
import pandas as pd |
|
|
|
|
|
_CITATION = """ |
|
@misc{cambrin2024quakeset, |
|
title={QuakeSet: A Dataset and Low-Resource Models to Monitor Earthquakes through Sentinel-1}, |
|
author={Daniele Rege Cambrin and Paolo Garza}, |
|
year={2024}, |
|
eprint={2403.18116}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
QuakeSet is a dataset of earthquake images from the Copernicus Sentinel-1 satellites. |
|
It contains images from before, after an earthquake, and a sample before the "before" sample. |
|
Ground truth contains magnitudes and locations of earthquakes provided by ISC. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/DarthReca/quakeset" |
|
|
|
_LICENSE = "OPENRAIL" |
|
|
|
|
|
|
|
_URLS = ["earthquakes.h5"] |
|
|
|
|
|
class QuakeSet(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="default", |
|
version=VERSION, |
|
description="Default configuration", |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def _info(self): |
|
if self.config.name == "default": |
|
features = datasets.Features( |
|
{ |
|
"sample_key": datasets.Value("string"), |
|
"pre_post_image": datasets.Array3D( |
|
shape=(4, 512, 512), dtype="float32" |
|
), |
|
"affected": datasets.ClassLabel(num_classes=2), |
|
"magnitude": datasets.Value("float32"), |
|
"hypocenter": datasets.Sequence( |
|
datasets.Value("float32"), length=3 |
|
), |
|
"epsg": datasets.Value("int32"), |
|
"x": datasets.Sequence(datasets.Value("float32"), length=512), |
|
"y": datasets.Sequence(datasets.Value("float32"), length=512), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
urls = _URLS |
|
files = dl_manager.download(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": files, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": files, |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": files, |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
sample_ids = [] |
|
with h5py.File(filepath[0]) as f: |
|
for key, patches in f.items(): |
|
attributes = dict(f[key].attrs) |
|
if attributes["split"] != split: |
|
continue |
|
sample_ids += [(f"{key}/{p}", 1, attributes) for p in patches.keys()] |
|
sample_ids += [ |
|
(f"{key}/{p}", 0, attributes) |
|
for p, v in patches.items() |
|
if "before" in v |
|
] |
|
|
|
for sample_id, label, attributes in sample_ids: |
|
if "x" in sample_id or "y" in sample_id: |
|
continue |
|
|
|
pre_key = "pre" if label == 1 else "before" |
|
post_key = "post" if label == 1 else "pre" |
|
pre_sample = f[sample_id][pre_key][...] |
|
post_sample = f[sample_id][post_key][...] |
|
pre_sample = np.nan_to_num(pre_sample, nan=0).transpose(2, 0, 1) |
|
post_sample = np.nan_to_num(post_sample, nan=0).transpose(2, 0, 1) |
|
sample = np.concatenate( |
|
[pre_sample, post_sample], axis=0, dtype=np.float32 |
|
) |
|
sample_key = f"{sample_id}/{post_key}" |
|
item = { |
|
"sample_key": sample_key, |
|
"pre_post_image": sample, |
|
"epsg": attributes["epsg"], |
|
} |
|
|
|
resource_id, patch_id = sample_id.split("/") |
|
x = f[resource_id]["x"][...] |
|
y = f[resource_id]["y"][...] |
|
x_start = int(patch_id.split("_")[1]) % (x.shape[0] // 512) |
|
y_start = int(patch_id.split("_")[1]) // (x.shape[0] // 512) |
|
x = x[x_start * 512 : (x_start + 1) * 512] |
|
y = y[y_start * 512 : (y_start + 1) * 512] |
|
item |= { |
|
"affected": label, |
|
"magnitude": np.float32(attributes["magnitude"]), |
|
"hypocenter": attributes["hypocenter"], |
|
"x": x.flatten(), |
|
"y": y.flatten(), |
|
} |
|
|
|
yield sample_key, item |
|
|