File size: 2,588 Bytes
67db036
996dd7f
67db036
0e0ef22
ca5a3f5
996dd7f
1fe3878
996dd7f
 
ba0402e
9aa361a
ba0402e
67db036
 
996dd7f
67db036
996dd7f
67db036
 
 
996dd7f
 
 
67db036
 
 
 
 
996dd7f
 
67db036
996dd7f
 
 
 
67db036
 
996dd7f
67db036
 
 
996dd7f
67db036
 
 
 
 
 
 
 
 
996dd7f
 
 
67db036
ba0402e
 
 
 
 
67db036
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import datasets
from datasets import load_dataset


_CONSTITUENT_DATASETS = ['SAT-4', 'SAT-6', 'NASC-TG2', 'WHU-RS19', 'RSSCN7', 'RS_C11', 'SIRI-WHU', 'EuroSAT',
                         'NWPU-RESISC45', 'PatternNet', 'RSD46-WHU', 'GID', 'CLRS', 'Optimal-31',
                         'Airbus-Wind-Turbines-Patches', 'USTC_SmokeRS', 'Canadian_Cropland',
                         'Ships-In-Satellite-Imagery', 'Satellite-Images-of-Hurricane-Damage',
                         'Brazilian_Coffee_Scenes', 'Brazilian_Cerrado-Savanna_Scenes', 'Million-AID',
                         'UC_Merced_LandUse_MultiLabel', 'MLRSNet',
                         'MultiScene', 'RSI-CB256', 'AID_MultiLabel']


class SATINConfig(datasets.BuilderConfig):
    """BuilderConfig for SATIN"""

    def __init__(self, name, **kwargs):

        super(SATINConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.name = name
        self.hf_dataset_name = 'jonathan-roberts1' + "/" + name
        self.description = None
        self.features = None


class SATIN(datasets.GeneratorBasedBuilder):
    """SATIN Images dataset"""

    BUILDER_CONFIGS = [SATINConfig(name=dataset_name) for dataset_name in _CONSTITUENT_DATASETS]

    def _info(self):
        if self.config.description is None or self.config.features is None:
            stream_dataset_info = load_dataset(self.config.hf_dataset_name, streaming=True, split='train').info
            self.config.description = stream_dataset_info.description
            self.config.features = stream_dataset_info.features
        return datasets.DatasetInfo(
            description=self.config.description,
            features=self.config.features,
        )

    def _split_generators(self, dl_manager):
        dataset = load_dataset(self.config.hf_dataset_name)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"data_path": dataset},
            ),
        ]

    def _generate_examples(self, data_path):
        # iterate over the Huggingface dataset and yield the idx, image and label
        _DEFAULT_SPLIT = 'train'
        huggingface_dataset = data_path['train']
        features = huggingface_dataset.features
        for idx, row in enumerate(huggingface_dataset):
            features_dict = {feature: row[feature] for feature in features}
            # Reorder features to make image the first feature
            image = features_dict.pop('image')
            features_dict = {'image': image, **features_dict}
            yield idx, features_dict