File size: 5,509 Bytes
3c33491
 
7f97f62
 
 
 
 
 
d635639
 
 
 
3c33491
 
18e3316
 
 
 
 
 
 
 
3c33491
 
18e3316
 
 
3c33491
 
 
 
18e3316
 
 
3c33491
18e3316
 
 
 
 
 
 
 
3c33491
d635639
 
 
 
 
 
 
 
18e3316
 
d635639
18e3316
d635639
18e3316
d635639
 
 
 
18e3316
 
d635639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18e3316
d635639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54776a7
 
d635639
54776a7
d635639
54776a7
d635639
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
"""Graptoloidea Specimens dataset."""

import os
import random
from typing import List
import datasets
import pandas as pd
import numpy as np
import csv
import logging
from PIL import Image
import ast

_CITATION = """\
@dataset{Xu2022graptolitespecimens
  title = {High-resolution images of 1550 Ordovician to Silurian graptolite specimens for global correlation and shale gas exploration},
  author = {Honghe Xu},
  year = {2022},
  url = {https://zenodo.org/records/6194943},
  publisher = {Zenodo}
}
"""

_DESCRIPTION = """\
This dataset includes high-quality images of specimens, each meticulously tagged with taxonomic details such as suborder, infraorder, family, and genus. 
Additionally, the dataset is enriched with crucial metadata like the geological stage, mean age value, and specific locality coordinates (longitude, latitude, and horizon). 
References to original specimen publications are also provided, ensuring comprehensive documentation for academic rigor.
"""

_HOMEPAGE = "https://zenodo.org/records/6194943"

_LICENSE = "CC BY 4.0"

_URL = "https://raw.githubusercontent.com/LeoZhangzaolin/photos/main/Final_GS_with_Images5.csv"

class GraptoloideaSpecimensDataset(datasets.GeneratorBasedBuilder):
    """This dataset script retrives my processed dataset. It stands as a vital resource for researchers and enthusiasts in the field of paleontology
    , particularly those focusing on graptolites. Its compilation not only aids in the study of these fascinating creatures but also contributes 
    significantly to our understanding of Earth's biological and geological past.
    """
    _URL = _URL
    VERSION = datasets.Version("1.1.0")
    
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "Suborder": datasets.Value("string"),
                    "Infraorder": datasets.Value("string"),
                    "Family (Subfamily)": datasets.Value("string"),
                    "Genus": datasets.Value("string"),
                    "tagged species name": datasets.Value("string"),
                    "image": datasets.Value("string"),
                    "Stage": datasets.Value("string"),
                    "mean age value": datasets.Value("float64"),
                    "Locality (Longitude, Latitude, Horizon)": datasets.Value("string"),
                    "Reference (specimens firstly published)": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license = _LICENSE,
            citation=_CITATION
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        downloaded_file = dl_manager.download_and_extract(self._URL)
        
        # Read the CSV file
        df = pd.read_csv(downloaded_file)
        df = df.sample(frac=1).reset_index(drop=True)  # Shuffle the dataset

        # Splitting the dataset
        train_size = int(0.7 * len(df))
        test_size = int(0.15 * len(df))

        train_df = df[:train_size]
        test_df = df[train_size:train_size + test_size]
        validation_df = df[train_size + test_size:]

        # Save split dataframes to temporary CSV files
        train_file = '/tmp/train_split.csv'
        test_file = '/tmp/test_split.csv'
        validation_file = '/tmp/validation_split.csv'
        
        train_df.to_csv(train_file, index=False)
        test_df.to_csv(test_file, index=False)
        validation_df.to_csv(validation_file, index=False)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_file}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_file}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_file}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples."""
        logging.info("generating examples from = %s", filepath)
        with open(filepath, encoding='utf-8') as f:
            reader = csv.DictReader(f)
            key = 0
            for row in reader:
                key += 1
                # Extracting data from each column
                suborder = row['Suborder'].strip()
                infraorder = row['Infraorder'].strip()
                family_subfamily = row['Family (Subfamily)'].strip()
                genus = row['Genus'].strip()
                species_name = row['tagged species name'].strip()
                image = row['image'].strip()
                stage = row['Stage'].strip()
                mean_age = row['mean age value']
                locality = row['Locality (Longitude, Latitude, Horizon)'].strip()
                reference = row['Reference (specimens firstly published)'].strip()

                # Constructing the example
                yield key, {
                    "Suborder": suborder,
                    "Infraorder": infraorder,
                    "Family (Subfamily)": family_subfamily,
                    "Genus": genus,
                    "tagged species name": species_name,
                    "image": image,
                    "Stage": stage,
                    "mean age value": mean_age,
                    "Locality (Longitude, Latitude, Horizon)": locality,
                    "Reference (specimens firstly published)": reference,
                }