Datasets:

Size:
n<1K
ArXiv:
DOI:
License:
DarthReca commited on
Commit
6b5bf53
1 Parent(s): fb57c0e

:new: Added dataset loader

Browse files
Files changed (1) hide show
  1. california_burned_areas.py +194 -0
california_burned_areas.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import List
17
+
18
+ import datasets
19
+ import h5py
20
+
21
+ # Find for instance the citation on arxiv or on the dataset repo/website
22
+ _CITATION = """\
23
+ @article{cabuar,
24
+ title={Ca{B}u{A}r: California {B}urned {A}reas dataset for delineation},
25
+ author={Rege Cambrin, Daniele and Colomba, Luca and Garza, Paolo},
26
+ journal={IEEE Geoscience and Remote Sensing Magazine},
27
+ doi={10.1109/MGRS.2023.3292467},
28
+ year={2023}
29
+ }
30
+ """
31
+
32
+ # You can copy an official description
33
+ _DESCRIPTION = """\
34
+ CaBuAr dataset contains images from Sentinel-2 satellites taken before and after a wildfire.
35
+ The ground truth masks are provided by the California Department of Forestry and Fire Protection and they are mapped on the images.
36
+ """
37
+
38
+ _HOMEPAGE = "https://huggingface.co/datasets/DarthReca/california_burned_areas"
39
+
40
+ _LICENSE = "OPENRAIL"
41
+
42
+ _URLS = "raw/pacthes/512x512.hdf5"
43
+
44
+
45
+ class CaBuArConfig(datasets.BuilderConfig):
46
+ """BuilderConfig for CaBuAr.
47
+
48
+ Parameters
49
+ ----------
50
+
51
+ load_prefire: bool
52
+ whether to load prefire data
53
+ train_folds: List[int]
54
+ list of folds to use for training
55
+ validation_folds: List[int]
56
+ list of folds to use for validation
57
+ test_folds: List[int]
58
+ list of folds to use for testing
59
+ **kwargs
60
+ keyword arguments forwarded to super.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ load_prefire: bool,
66
+ train_folds: List[int],
67
+ validation_folds: List[int],
68
+ test_folds: List[int],
69
+ **kwargs
70
+ ):
71
+ super(CaBuArConfig, self).__init__(**kwargs)
72
+ self.load_prefire = load_prefire
73
+ self.train_folds = train_folds
74
+ self.validation_folds = validation_folds
75
+ self.test_folds = test_folds
76
+
77
+
78
+ class CaBuAr(datasets.GeneratorBasedBuilder):
79
+ """California Burned Areas dataset."""
80
+
81
+ VERSION = datasets.Version("1.0.0")
82
+
83
+ BUILDER_CONFIGS = [
84
+ CaBuArConfig(
85
+ name="post-fire",
86
+ version=VERSION,
87
+ description="Post-fire only version of the dataset",
88
+ load_prefire=False,
89
+ train_folds=None,
90
+ validation_folds=None,
91
+ test_folds=None,
92
+ ),
93
+ CaBuArConfig(
94
+ name="pre-post-fire",
95
+ version=VERSION,
96
+ description="Pre-fire and post-fire version of the dataset",
97
+ load_prefire=True,
98
+ train_folds=None,
99
+ validation_folds=None,
100
+ test_folds=None,
101
+ ),
102
+ ]
103
+
104
+ DEFAULT_CONFIG_NAME = "post-fire"
105
+
106
+ def _info(self):
107
+ if self.config.name == "pre-post-fire":
108
+ features = (
109
+ datasets.Features(
110
+ {
111
+ "post_fire": datasets.Array3D((512, 512, 12)),
112
+ "pre_fire": datasets.Array3D((512, 512, 12)),
113
+ "mask": datasets.Array3D((512, 512, 1)),
114
+ }
115
+ ),
116
+ )
117
+ else:
118
+ features = (
119
+ datasets.Features(
120
+ {
121
+ "post_fire": datasets.Array3D((512, 512, 12)),
122
+ "mask": datasets.Array3D((512, 512, 12)),
123
+ }
124
+ ),
125
+ )
126
+
127
+ return datasets.DatasetInfo(
128
+ # This is the description that will appear on the datasets page.
129
+ description=_DESCRIPTION,
130
+ # This defines the different columns of the dataset and their types
131
+ features=features,
132
+ # Homepage of the dataset for documentation
133
+ homepage=_HOMEPAGE,
134
+ # License for the dataset if available
135
+ license=_LICENSE,
136
+ # Citation for the dataset
137
+ citation=_CITATION,
138
+ )
139
+
140
+ def _split_generators(self, dl_manager):
141
+ h5_file = dl_manager.download(_URLS)
142
+ # Raise ValueError if train_folds, validation_folds or test_folds are not set
143
+ if (
144
+ self.config.train_folds is None
145
+ or self.config.validation_folds is None
146
+ or self.config.test_folds is None
147
+ ):
148
+ raise ValueError("train_folds, validation_folds and test_folds must be set")
149
+
150
+ return [
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.TRAIN,
153
+ # These kwargs will be passed to _generate_examples
154
+ gen_kwargs={
155
+ "folds": self.config.train_folds,
156
+ "load_prefire": self.config.load_prefire,
157
+ "filepath": h5_file,
158
+ },
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.VALIDATION,
162
+ # These kwargs will be passed to _generate_examples
163
+ gen_kwargs={
164
+ "folds:": self.config.validation_folds,
165
+ "load_prefire": self.config.load_prefire,
166
+ "filepath": h5_file,
167
+ },
168
+ ),
169
+ datasets.SplitGenerator(
170
+ name=datasets.Split.TEST,
171
+ # These kwargs will be passed to _generate_examples
172
+ gen_kwargs={
173
+ "folds": self.config.test_folds,
174
+ "load_prefire": self.config.load_prefire,
175
+ "filepath": h5_file,
176
+ },
177
+ ),
178
+ ]
179
+
180
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
181
+ def _generate_examples(self, folds: List[int], load_prefire: bool, filepath):
182
+ with h5py.File(filepath, "r") as f:
183
+ for uuid, values in f.items():
184
+ if values.attrs["fold"] not in folds:
185
+ continue
186
+ if load_prefire and "pre_fire" not in values:
187
+ continue
188
+ sample = {
189
+ "post_fire": values["post_fire"][...],
190
+ "mask": values["mask"][...],
191
+ }
192
+ if load_prefire:
193
+ sample["pre_fire"] = values["pre_fire"][...]
194
+ yield uuid, sample