# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Address all TODOs and remove all explanatory comments """TODO: Add a description here.""" import csv import json import os import math import requests from io import BytesIO from zipfile import ZipFile from urllib.request import urlopen import pandas as pd import datasets # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This new dataset is designed to solve this great NLP task and is crafted with a lot of care. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" _LILA_SAS_URLS = pd.read_csv("https://lila.science/wp-content/uploads/2020/03/lila_sas_urls.txt") _LILA_SAS_URLS.rename(columns={"# name": "name"}, inplace=True) _METADATA_BASE_URL = "https://huggingface.co/datasets/NimaBoscarino/LILA/resolve/main/data/" # How do I make these point to the particular commit ID? _LILA_URLS = { "Caltech Camera Traps": "Caltech_Camera_Traps.jsonl", "ENA24": "ENA24.jsonl", "Missouri Camera Traps": "Missouri_Camera_Traps.jsonl", "NACTI": "NACTI.jsonl.zip", "WCS Camera Traps": "WCS_Camera_Traps.jsonl.zip", "Wellington Camera Traps": "Wellington_Camera_Traps.jsonl.zip", "Island Conservation Camera Traps": "Island_Conservation_Camera_Traps.jsonl.zip", "Channel Islands Camera Traps": "Channel_Islands_Camera_Traps.jsonl.zip", "Idaho Camera Traps": "Idaho_Camera_Traps.jsonl.zip", "Snapshot Serengeti": "Snapshot_Serengeti.jsonl.zip", "Snapshot Karoo": "Snapshot_Karoo.jsonl.zip", "Snapshot Kgalagadi": "Snapshot_Kgalagadi.jsonl", "Snapshot Enonkishu": "Snapshot_Enonkishu.jsonl.zip", "Snapshot Camdeboo": "Snapshot_Camdeboo.jsonl.zip", "Snapshot Mountain Zebra": "Snapshot_Mountain_Zebra.jsonl.zip", "Snapshot Kruger": "Snapshot_Kruger.jsonl", "SWG Camera Traps": "SWG_Camera_Traps.jsonl.zip", "Orinoquia Camera Traps": "Orinoquia_Camera_Traps.jsonl.zip", } class LILAConfig(datasets.BuilderConfig): """Builder Config for LILA""" def __init__(self, image_base_url, metadata_url, **kwargs): """BuilderConfig for LILA. Args: **kwargs: keyword arguments forwarded to super. """ super(LILAConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) self.image_base_url = image_base_url self.metadata_url = metadata_url class LILA(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ LILAConfig( name=row.name, # description="TODO: Description", image_base_url=row.image_base_url, metadata_url=_METADATA_BASE_URL + _LILA_URLS[row.name] ) for row in _LILA_SAS_URLS.itertuples() ] def _get_features(self) -> datasets.Features: # TODO: Use ClassLabel for categories... # TODO: Deal with 404s -> In my manual preprocessing, or in the datasets library? if self.config.name == 'Caltech Camera Traps': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "seq_num_frames": datasets.Value("int32"), "date_captured": datasets.Value("date32"), "seq_id": datasets.Value("string"), "location": datasets.Value("string"), "rights_holder": datasets.Value("string"), "frame_num": datasets.Value("int32"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), }), "bboxes": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'ENA24': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'Missouri Camera Traps': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "seq_id": datasets.Value("string"), "seq_num_frames": datasets.Value("int32"), "frame_num": datasets.Value("int32"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "sequence_level_annotation": datasets.Value("bool"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'NACTI': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "study": datasets.Value("string"), "location": datasets.Value("string"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), }), "bboxes": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'WCS Camera Traps': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "wcs_id": datasets.Value("string"), "location": datasets.Value("string"), "frame_num": datasets.Value("int32"), "match_level": datasets.Value("int32"), "seq_id": datasets.Value("string"), "country_code": datasets.Value("string"), "seq_num_frames": datasets.Value("int32"), "status": datasets.Value("string"), "datetime": datasets.Value("date32"), "corrupt": datasets.Value("bool"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "count": datasets.Value("int32"), "sex": datasets.Value("string"), "age": datasets.Value("string"), }), "bboxes": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'Wellington Camera Traps': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "frame_num": datasets.Value("int32"), "seq_id": datasets.Value("string"), "site": datasets.Value("string"), "camera": datasets.Value("string"), "datetime": datasets.Value("date32"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'Island Conservation Camera Traps': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'Channel Islands Camera Traps': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "frame_num": datasets.Value("int32"), "seq_id": datasets.Value("string"), "seq_num_frames": datasets.Value("int32"), "original_relative_path": datasets.Value("string"), "location": datasets.Value("string"), "temperature": datasets.Value("string"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "sequence_level_annotation": datasets.Value("bool"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'Idaho Camera Traps': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "frame_num": datasets.Value("int32"), "seq_id": datasets.Value("string"), "seq_num_frames": datasets.Value("int32"), "original_relative_path": datasets.Value("string"), "datetime": datasets.Value("date32"), "location": datasets.Value("string"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "sequence_level_annotation": datasets.Value("bool"), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'Snapshot Serengeti': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "frame_num": datasets.Value("int32"), "seq_id": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "seq_num_frames": datasets.Value("int32"), "datetime": datasets.Value("date32"), "corrupt": datasets.Value("bool"), "location": datasets.Value("string"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "sequence_level_annotation": datasets.Value("bool"), "seq_id": datasets.Value("string"), "season": datasets.Value("string"), "datetime": datasets.Value("date32"), "subject_id": datasets.Value("string"), "count": datasets.Value("string"), "standing": datasets.Value("float32"), "resting": datasets.Value("float32"), "moving": datasets.Value("float32"), "interacting": datasets.Value("float32"), "young_present": datasets.Value("float32"), "location": datasets.Value("string"), }), "bboxes": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), }), "image": datasets.Image(decode=False), }) elif self.config.name in [ 'Snapshot Karoo', 'Snapshot Kgalagadi', 'Snapshot Enonkishu', 'Snapshot Camdeboo', 'Snapshot Mountain Zebra', 'Snapshot Kruger' ]: return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "frame_num": datasets.Value("int32"), "seq_id": datasets.Value("string"), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "seq_num_frames": datasets.Value("int32"), "datetime": datasets.Value("date32"), "corrupt": datasets.Value("bool"), "location": datasets.Value("string"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "category_id": datasets.Value("int32"), "sequence_level_annotation": datasets.Value("bool"), "seq_id": datasets.Value("string"), "season": datasets.Value("string"), "datetime": datasets.Value("date32"), "subject_id": datasets.Value("string"), "count": datasets.Value("string"), "standing": datasets.Value("float32"), "resting": datasets.Value("float32"), "moving": datasets.Value("float32"), "interacting": datasets.Value("float32"), "young_present": datasets.Value("float32"), "location": datasets.Value("string"), }), "image": datasets.Image(decode=False), }) elif self.config.name == 'Orinoquia Camera Traps': return datasets.Features({ "id": datasets.Value("string"), "file_name": datasets.Value("string"), "frame_num": datasets.Value("int32"), "seq_id": datasets.Value("string"), "seq_num_frames": datasets.Value("int32"), "datetime": datasets.Value("date32"), "location": datasets.Value("string"), "annotations": datasets.Sequence({ "id": datasets.Value("string"), "sequence_level_annotation": datasets.Value("bool"), "category_id": datasets.Value("int32"), }), "image": datasets.Image(decode=False), }) def _info(self): features = self._get_features() return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and # specify them. They'll be used if as_supervised=True in builder.as_dataset. # supervised_keys=("sentence", "label"), # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): archive_path = dl_manager.download_and_extract(self.config.metadata_url) if archive_path.endswith(".zip"): archive_path = os.path.join(archive_path, os.listdir(archive_path)[0]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": archive_path, "split": "train", }, ), ] def _generate_examples(self, filepath, split): with open(filepath) as f: for line in f: example = json.loads(line) image_url = f"{self.config.image_base_url}/{example['file_name']}" yield example["id"], { **example, "image": image_url }