Graptoloidea-Specimens-Imaging / Graptolodiea-Speciemens_Imaging.py
LeoZhangzaolin's picture
Update Graptolodiea-Speciemens_Imaging.py
7f97f62 verified
raw
history blame
4 kB
"""Graptoloidea Specimens dataset."""
import os
import random
from typing import List
import datasets
import pandas as pd
import numpy as np
_CITATION = """\
"""
_DESCRIPTION = """\
Dataset Description:
The Graptoloidea Specimens Imaging dataset is a curated collection of over 1,300 image-text pairs, focusing on Graptoloidea specimens. It encompasses detailed attributes such as species classification, geological stages, and specific locality information (with coordinates), complemented by high-quality images of each specimen. This dataset serves as a valuable resource for paleontological research, offering insights into the morphological diversity and geological distribution of Graptoloidea.
Highlights:
- Comprehensive Collection: Over 1,300 unique specimens, each with a corresponding high-quality image and descriptive text.
- Diverse Geological Coverage: Specimens span different geological stages, offering a timeline of the Graptoloidea evolution.
- Rich Annotations: Apart from visual data, the dataset includes detailed taxonomic classification, geological context, and precise locality information.
- Research-Ready: Ideal for tasks like paleontological classification, morphological analysis, age estimation, and geographical distribution studies.
- Educational Value: Serves as an invaluable resource for educational and outreach programs, providing tangible insights into paleontology.
"""
_HOMEPAGE = "https://zenodo.org/records/6194943"
_license = ""
_URLS = {
"part1": "https://zenodo.org/records/6194943/files/graptolite%20specimens%20with%20scale.zip.001?download=1",
"part2": "https://zenodo.org/records/6194943/files/graptolite%20specimens%20with%20scale.zip.002?download=1",
}
class GraptoloideaSpecimensDataset(datasets.GeneratorBasedBuilder):
"""Imaging for graptoloidea specimens with extra information"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"Suborder": datasets.Value("string"),
"Infraorder": datasets.Value("string"),
"Family (Subfamily)": datasets.Value("string"),
"Genus": datasets.Value("string"),
"Tagged Species Name": datasets.Value("string"),
"Image": datasets.Value("string"),
"Stage": datasets.Value("string"),
"Mean Age Value": datasets.Value("float64"),
"Locality (Longitude, Latitude, Horizon)": datasets.Value("string"),
"Reference (Specimens Firstly Published)": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
downloaded_files = dl_manager.download(_URLS)
combined_zip_path = os.path.join(dl_manager.manual_dir, 'combined.zip')
with open(combined_zip_path, 'wb') as f_out:
for part in ['part1', 'part2']:
with open(downloaded_files[part], 'rb') as f_in:
f_out.write(f_in.read())
with zipfile.ZipFile(combined_zip_path, 'r') as zip_ref:
zip_ref.extractall(dl_manager.manual_dir)
all_files = [os.path.join(dl_manager.manual_dir, f) for f in os.listdir(dl_manager.manual_dir)
if os.path.isfile(os.path.join(dl_manager.manual_dir, f))]
random.shuffle(all_files)
split_index = int(0.8 * len(all_files))
train_files = all_files[:split_index]
validation_files = all_files[split_index:]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": train_files}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": validation_files}),
]