NimaBoscarino commited on
Commit
fb5267f
1 Parent(s): c1c4bf0

Create lila.py

Browse files
Files changed (1) hide show
  1. lila.py +187 -0
lila.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ import math
22
+ import requests
23
+ from io import BytesIO
24
+ from zipfile import ZipFile
25
+ from urllib.request import urlopen
26
+ import pandas as pd
27
+
28
+ import datasets
29
+
30
+ # TODO: Add BibTeX citation
31
+ # Find for instance the citation on arxiv or on the dataset repo/website
32
+ _CITATION = """\
33
+ @InProceedings{huggingface:dataset,
34
+ title = {A great new dataset},
35
+ author={huggingface, Inc.
36
+ },
37
+ year={2020}
38
+ }
39
+ """
40
+
41
+ # TODO: Add description of the dataset here
42
+ # You can copy an official description
43
+ _DESCRIPTION = """\
44
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
45
+ """
46
+
47
+ # TODO: Add a link to an official homepage for the dataset here
48
+ _HOMEPAGE = ""
49
+
50
+ # TODO: Add the licence for the dataset here if you can find it
51
+ _LICENSE = ""
52
+
53
+ _LILA_SAS_URLS = pd.read_csv("https://lila.science/wp-content/uploads/2020/03/lila_sas_urls.txt")
54
+ _LILA_SAS_URLS.rename(columns={"# name": "name"}, inplace=True)
55
+
56
+ # How do I make these point to the particular commit ID?
57
+ _LILA_URLS = {
58
+ "Caltech Camera Traps": "https://huggingface.co/datasets/NimaBoscarino/LILA/resolve/main/data/Caltech_Camera_Traps.jsonl",
59
+ "ENA24": "https://huggingface.co/datasets/NimaBoscarino/LILA/resolve/main/data/ENA24.jsonl",
60
+ "Missouri Camera Traps": "",
61
+ "NACTI": "",
62
+ "WCS Camera Traps": "",
63
+ "Wellington Camera Traps": "",
64
+ "Island Conservation Camera Traps": "",
65
+ "Channel Islands Camera Traps": "",
66
+ "Idaho Camera Traps": "",
67
+ "Snapshot Serengeti": "",
68
+ "Snapshot Karoo": "",
69
+ "Snapshot Kgalagadi": "",
70
+ "Snapshot Enonkishu": "",
71
+ "Snapshot Camdeboo": "",
72
+ "Snapshot Mountain Zebra": "",
73
+ "Snapshot Kruger": "",
74
+ "SWG Camera Traps": "",
75
+ "Orinoquia Camera Traps": "",
76
+ }
77
+
78
+ class LILAConfig(datasets.BuilderConfig):
79
+ """Builder Config for LILA"""
80
+
81
+ def __init__(self, image_base_url, metadata_url, **kwargs):
82
+ """BuilderConfig for LILA.
83
+ Args:
84
+ **kwargs: keyword arguments forwarded to super.
85
+ """
86
+ super(LILAConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
87
+ self.image_base_url = image_base_url
88
+ self.metadata_url = metadata_url
89
+
90
+
91
+ class LILA(datasets.GeneratorBasedBuilder):
92
+ """TODO: Short description of my dataset."""
93
+
94
+ VERSION = datasets.Version("1.1.0")
95
+
96
+ BUILDER_CONFIGS = [
97
+ LILAConfig(
98
+ name=row.name,
99
+ # description="TODO: Description",
100
+ image_base_url=row.image_base_url,
101
+ metadata_url=_LILA_URLS[row.name]
102
+ ) for row in _LILA_SAS_URLS.itertuples()
103
+ ]
104
+
105
+ def _get_features(self) -> datasets.Features:
106
+ # TODO: Use ClassLabel for categories...
107
+ # TODO: Deal with 404s -> In my manual preprocessing, or in the datasets library?
108
+
109
+ match self.config.name:
110
+ case 'Caltech Camera Traps':
111
+ return datasets.Features({
112
+ "id": datasets.Value("string"), "file_name": datasets.Value("string"),
113
+ "width": datasets.Value("int32"), "height": datasets.Value("int32"),
114
+ "seq_num_frames": datasets.Value("int32"),
115
+ "date_captured": datasets.Value("date32"),
116
+ "seq_id": datasets.Value("string"),
117
+ "location": datasets.Value("string"),
118
+ "rights_holder": datasets.Value("string"),
119
+ "frame_num": datasets.Value("int32"),
120
+
121
+
122
+ "annotations": datasets.Sequence({
123
+ "id": datasets.Value("string"),
124
+ "category_id": datasets.Value("int32"),
125
+ }),
126
+
127
+ "bboxes": datasets.Sequence({
128
+ "id": datasets.Value("string"),
129
+ "category_id": datasets.Value("int32"),
130
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
131
+ }),
132
+
133
+ "image": datasets.Image(decode=False),
134
+ })
135
+ case 'ENA24':
136
+ return datasets.Features({
137
+ "id": datasets.Value("string"), "file_name": datasets.Value("string"),
138
+ "width": datasets.Value("int32"), "height": datasets.Value("int32"),
139
+ "annotations": datasets.Sequence({
140
+ "id": datasets.Value("string"),
141
+ "category_id": datasets.Value("int32"),
142
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
143
+ }),
144
+ "image": datasets.Image(decode=False),
145
+ })
146
+
147
+ def _info(self):
148
+ features = self._get_features()
149
+
150
+ return datasets.DatasetInfo(
151
+ # This is the description that will appear on the datasets page.
152
+ description=_DESCRIPTION,
153
+ # This defines the different columns of the dataset and their types
154
+ features=features, # Here we define them above because they are different between the two configurations
155
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
156
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
157
+ # supervised_keys=("sentence", "label"),
158
+ # Homepage of the dataset for documentation
159
+ homepage=_HOMEPAGE,
160
+ # License for the dataset if available
161
+ license=_LICENSE,
162
+ # Citation for the dataset
163
+ citation=_CITATION,
164
+ )
165
+
166
+ def _split_generators(self, dl_manager):
167
+ archive_path = dl_manager.download_and_extract(self.config.metadata_url)
168
+
169
+ return [
170
+ datasets.SplitGenerator(
171
+ name=datasets.Split.TRAIN,
172
+ gen_kwargs={
173
+ "filepath": archive_path,
174
+ "split": "train",
175
+ },
176
+ ),
177
+ ]
178
+
179
+ def _generate_examples(self, filepath, split):
180
+ with open(filepath) as f:
181
+ for line in f:
182
+ example = json.loads(line)
183
+ image_url = f"{self.config.image_base_url}/{example['file_name']}"
184
+ yield example["id"], {
185
+ **example,
186
+ "image": image_url
187
+ }