Rhodes
commited on
Commit
•
5f6d4ef
1
Parent(s):
e8836b8
:hammer: Added citation
Browse files- quakeset.py +25 -52
quakeset.py
CHANGED
@@ -22,8 +22,15 @@ import numpy as np
|
|
22 |
import pandas as pd
|
23 |
|
24 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
25 |
-
_CITATION = """
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
"""
|
28 |
|
29 |
# You can copy an official description
|
@@ -39,7 +46,7 @@ _LICENSE = "OPENRAIL"
|
|
39 |
|
40 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
41 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
42 |
-
_URLS = ["earthquakes.h5"
|
43 |
|
44 |
|
45 |
class QuakeSet(datasets.GeneratorBasedBuilder):
|
@@ -56,19 +63,12 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
|
|
56 |
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
57 |
|
58 |
# You will be able to load one or the other configurations in the following list with
|
59 |
-
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
60 |
-
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
61 |
BUILDER_CONFIGS = [
|
62 |
datasets.BuilderConfig(
|
63 |
name="default",
|
64 |
version=VERSION,
|
65 |
description="Default configuration",
|
66 |
-
)
|
67 |
-
datasets.BuilderConfig(
|
68 |
-
name="epicenter",
|
69 |
-
version=VERSION,
|
70 |
-
description="Epicenter configuration",
|
71 |
-
),
|
72 |
]
|
73 |
|
74 |
DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
@@ -91,21 +91,6 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
|
|
91 |
"y": datasets.Sequence(datasets.Value("float32"), length=512),
|
92 |
}
|
93 |
)
|
94 |
-
elif self.config.name == "epicenter":
|
95 |
-
features = datasets.Features(
|
96 |
-
{
|
97 |
-
"sample_key": datasets.Value("string"), # sample_id
|
98 |
-
"pre_post_image": datasets.Array3D(
|
99 |
-
shape=(4, 512, 512), dtype="float32"
|
100 |
-
),
|
101 |
-
"contains_epicenter": datasets.ClassLabel(num_classes=2),
|
102 |
-
"epsg": datasets.Value("int32"),
|
103 |
-
"epicenter": datasets.Sequence(datasets.Value("float32"), length=2),
|
104 |
-
"lon": datasets.Sequence(datasets.Value("float32"), length=512),
|
105 |
-
"lat": datasets.Sequence(datasets.Value("float32"), length=512),
|
106 |
-
"affected": datasets.ClassLabel(num_classes=2),
|
107 |
-
}
|
108 |
-
)
|
109 |
|
110 |
return datasets.DatasetInfo(
|
111 |
# This is the description that will appear on the datasets page.
|
@@ -160,7 +145,6 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
|
|
160 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
161 |
def _generate_examples(self, filepath, split):
|
162 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
163 |
-
df = pd.read_parquet(filepath[1])
|
164 |
sample_ids = []
|
165 |
with h5py.File(filepath[0]) as f:
|
166 |
for key, patches in f.items():
|
@@ -194,30 +178,19 @@ class QuakeSet(datasets.GeneratorBasedBuilder):
|
|
194 |
"epsg": attributes["epsg"],
|
195 |
}
|
196 |
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
}
|
212 |
-
elif self.config.name == "epicenter":
|
213 |
-
selected_infos = df[df["sample_id"] == sample_key]
|
214 |
-
item |= {
|
215 |
-
"affected": label,
|
216 |
-
"contains_epicenter": label == 1
|
217 |
-
and selected_infos["contains_epicenter"].item(),
|
218 |
-
"epicenter": selected_infos["epicenter"].item(),
|
219 |
-
"lon": selected_infos["lon"].item(),
|
220 |
-
"lat": selected_infos["lat"].item(),
|
221 |
-
}
|
222 |
|
223 |
yield sample_key, item
|
|
|
22 |
import pandas as pd
|
23 |
|
24 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
25 |
+
_CITATION = """
|
26 |
+
@misc{cambrin2024quakeset,
|
27 |
+
title={QuakeSet: A Dataset and Low-Resource Models to Monitor Earthquakes through Sentinel-1},
|
28 |
+
author={Daniele Rege Cambrin and Paolo Garza},
|
29 |
+
year={2024},
|
30 |
+
eprint={2403.18116},
|
31 |
+
archivePrefix={arXiv},
|
32 |
+
primaryClass={cs.CV}
|
33 |
+
}
|
34 |
"""
|
35 |
|
36 |
# You can copy an official description
|
|
|
46 |
|
47 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
48 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
49 |
+
_URLS = ["earthquakes.h5"]
|
50 |
|
51 |
|
52 |
class QuakeSet(datasets.GeneratorBasedBuilder):
|
|
|
63 |
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
64 |
|
65 |
# You will be able to load one or the other configurations in the following list with
|
|
|
|
|
66 |
BUILDER_CONFIGS = [
|
67 |
datasets.BuilderConfig(
|
68 |
name="default",
|
69 |
version=VERSION,
|
70 |
description="Default configuration",
|
71 |
+
)
|
|
|
|
|
|
|
|
|
|
|
72 |
]
|
73 |
|
74 |
DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
|
|
91 |
"y": datasets.Sequence(datasets.Value("float32"), length=512),
|
92 |
}
|
93 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
return datasets.DatasetInfo(
|
96 |
# This is the description that will appear on the datasets page.
|
|
|
145 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
146 |
def _generate_examples(self, filepath, split):
|
147 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
|
|
148 |
sample_ids = []
|
149 |
with h5py.File(filepath[0]) as f:
|
150 |
for key, patches in f.items():
|
|
|
178 |
"epsg": attributes["epsg"],
|
179 |
}
|
180 |
|
181 |
+
resource_id, patch_id = sample_id.split("/")
|
182 |
+
x = f[resource_id]["x"][...]
|
183 |
+
y = f[resource_id]["y"][...]
|
184 |
+
x_start = int(patch_id.split("_")[1]) % (x.shape[0] // 512)
|
185 |
+
y_start = int(patch_id.split("_")[1]) // (x.shape[0] // 512)
|
186 |
+
x = x[x_start * 512 : (x_start + 1) * 512]
|
187 |
+
y = y[y_start * 512 : (y_start + 1) * 512]
|
188 |
+
item |= {
|
189 |
+
"affected": label,
|
190 |
+
"magnitude": np.float32(attributes["magnitude"]),
|
191 |
+
"hypocenter": attributes["hypocenter"],
|
192 |
+
"x": x.flatten(),
|
193 |
+
"y": y.flatten(),
|
194 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
yield sample_key, item
|