|
import struct |
|
|
|
import numpy as np |
|
|
|
import datasets |
|
from datasets.tasks import ImageClassification |
|
|
|
_CITATION = R""" |
|
@article{DBLP:journals/corr/abs-1812-01718, |
|
author = {Tarin Clanuwat and |
|
Mikel Bober{-}Irizar and |
|
Asanobu Kitamoto and |
|
Alex Lamb and |
|
Kazuaki Yamamoto and |
|
David Ha}, |
|
title = {Deep Learning for Classical Japanese Literature}, |
|
journal = {CoRR}, |
|
volume = {abs/1812.01718}, |
|
year = {2018}, |
|
url = {http://arxiv.org/abs/1812.01718}, |
|
eprinttype = {arXiv}, |
|
eprint = {1812.01718}, |
|
timestamp = {Thu, 14 Oct 2021 09:15:14 +0200}, |
|
biburl = {https://dblp.org/rec/journals/corr/abs-1812-01718.bib}, |
|
bibsource = {dblp computer science bibliography, https://dblp.org} |
|
} |
|
""" |
|
|
|
_URL = "./raw/" |
|
_URLS = { |
|
"train_images": "train-images-idx3-ubyte.gz", |
|
"train_labels": "train-labels-idx1-ubyte.gz", |
|
"test_images": "t10k-images-idx3-ubyte.gz", |
|
"test_labels": "t10k-labels-idx1-ubyte.gz", |
|
} |
|
|
|
|
|
class KMNIST(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="kmnist", |
|
version=datasets.Version("1.0.0"), |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"label": datasets.features.ClassLabel( |
|
names=[ |
|
"お", |
|
"き", |
|
"す", |
|
"つ", |
|
"な", |
|
"は", |
|
"ま", |
|
"や", |
|
"れ", |
|
"を", |
|
] |
|
), |
|
} |
|
), |
|
supervised_keys=("image", "label"), |
|
homepage="https://github.com/rois-codh/kmnist", |
|
citation=_CITATION, |
|
task_templates=[ |
|
ImageClassification( |
|
image_column="image", |
|
label_column="label", |
|
) |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls_to_download = {key: _URL + fname for key, fname in _URLS.items()} |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": ( |
|
downloaded_files["train_images"], |
|
downloaded_files["train_labels"], |
|
), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": ( |
|
downloaded_files["test_images"], |
|
downloaded_files["test_labels"], |
|
), |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""This function returns the examples in the raw form.""" |
|
|
|
with open(filepath[0], "rb") as f: |
|
|
|
_ = f.read(4) |
|
size = struct.unpack(">I", f.read(4))[0] |
|
_ = f.read(8) |
|
images = np.frombuffer(f.read(), dtype=np.uint8).reshape(size, 28, 28) |
|
|
|
|
|
with open(filepath[1], "rb") as f: |
|
|
|
_ = f.read(8) |
|
labels = np.frombuffer(f.read(), dtype=np.uint8) |
|
|
|
for idx in range(size): |
|
yield idx, {"image": images[idx], "label": str(labels[idx])} |
|
|