File size: 4,000 Bytes
3c33491
 
7f97f62
 
 
 
 
 
3c33491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f97f62
 
3c33491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f97f62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3c33491
7f97f62
 
3c33491
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
"""Graptoloidea Specimens dataset."""

import os
import random
from typing import List
import datasets
import pandas as pd
import numpy as np

_CITATION = """\

"""



_DESCRIPTION = """\
Dataset Description:
The Graptoloidea Specimens Imaging dataset is a curated collection of over 1,300 image-text pairs, focusing on Graptoloidea specimens. It encompasses detailed attributes such as species classification, geological stages, and specific locality information (with coordinates), complemented by high-quality images of each specimen. This dataset serves as a valuable resource for paleontological research, offering insights into the morphological diversity and geological distribution of Graptoloidea.

Highlights:
- Comprehensive Collection: Over 1,300 unique specimens, each with a corresponding high-quality image and descriptive text.
- Diverse Geological Coverage: Specimens span different geological stages, offering a timeline of the Graptoloidea evolution.
- Rich Annotations: Apart from visual data, the dataset includes detailed taxonomic classification, geological context, and precise locality information.
- Research-Ready: Ideal for tasks like paleontological classification, morphological analysis, age estimation, and geographical distribution studies.
- Educational Value: Serves as an invaluable resource for educational and outreach programs, providing tangible insights into paleontology.
"""


_HOMEPAGE = "https://zenodo.org/records/6194943"


_license = ""


_URLS = {
    "part1": "https://zenodo.org/records/6194943/files/graptolite%20specimens%20with%20scale.zip.001?download=1",
    "part2": "https://zenodo.org/records/6194943/files/graptolite%20specimens%20with%20scale.zip.002?download=1",
}


class GraptoloideaSpecimensDataset(datasets.GeneratorBasedBuilder):
    """Imaging for graptoloidea specimens with extra information"""


    def _info(self):
    return datasets.DatasetInfo(
        description=_DESCRIPTION,
        features=datasets.Features(
            {
                "Suborder": datasets.Value("string"),
                "Infraorder": datasets.Value("string"),
                "Family (Subfamily)": datasets.Value("string"),
                "Genus": datasets.Value("string"),
                "Tagged Species Name": datasets.Value("string"),
                "Image": datasets.Value("string"),
                "Stage": datasets.Value("string"),
                "Mean Age Value": datasets.Value("float64"),
                "Locality (Longitude, Latitude, Horizon)": datasets.Value("string"),
                "Reference (Specimens Firstly Published)": datasets.Value("string"),
            }
        ),
        supervised_keys=None,
        homepage=_HOMEPAGE,
        citation=_CITATION,
    )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        downloaded_files = dl_manager.download(_URLS)
        combined_zip_path = os.path.join(dl_manager.manual_dir, 'combined.zip')
        with open(combined_zip_path, 'wb') as f_out:
            for part in ['part1', 'part2']:
                with open(downloaded_files[part], 'rb') as f_in:
                    f_out.write(f_in.read())
        with zipfile.ZipFile(combined_zip_path, 'r') as zip_ref:
            zip_ref.extractall(dl_manager.manual_dir)
        all_files = [os.path.join(dl_manager.manual_dir, f) for f in os.listdir(dl_manager.manual_dir) 
                     if os.path.isfile(os.path.join(dl_manager.manual_dir, f))]
        random.shuffle(all_files)
        split_index = int(0.8 * len(all_files))  
        train_files = all_files[:split_index]
        validation_files = all_files[split_index:]
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": train_files}),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": validation_files}),
        ]