ICDAR-2011 / ICDAR-2011.py
1aurent's picture
Rename icdar-2011.py to ICDAR-2011.py
2553557
raw
history blame
4.33 kB
"""Dataset class for Individuality Of Handwriting dataset."""
import itertools
import pathlib
import zipfile
import tqdm
from datasets.tasks import ImageClassification
import datasets
_BASE_URLS = {
"train": "http://iapr-tc11.org/dataset/ICDAR_SignatureVerification/SigComp2011/sigComp2011-trainingSet.zip",
"test": "http://iapr-tc11.org/dataset/ICDAR_SignatureVerification/SigComp2011/sigComp2011-test.zip"
}
_BASE_URLS_PWD = b"I hereby accept the SigComp 2011 disclaimer."
_HOMEPAGE = "http://iapr-tc11.org/mediawiki/index.php/ICDAR_2011_Signature_Verification_Competition_(SigComp2011)"
_DESCRIPTION = """
The collection contains simultaneously acquired online and offline samples.
The collection contains offline and online signature samples.
The offline dataset comprises PNG images, scanned at 400 dpi, RGB color.
The online dataset comprises ascii files with the format: X, Y, Z (per line).
"""
_NAMES = [
"genuine",
"forgeries",
]
class ICDAR2011(datasets.GeneratorBasedBuilder):
"""ICDAR-2011 Images dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.ClassLabel(names=_NAMES),
"forger": datasets.Value("int32"),
"writer": datasets.Value("uint32"),
"attempt": datasets.Value("uint32"),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
task_templates=[ImageClassification(image_column="image", label_column="label")],
)
def _split_generators(self, dl_manager):
train_archive_path = pathlib.Path(dl_manager.download(_BASE_URLS["train"]))
test_archive_path = pathlib.Path(dl_manager.download(_BASE_URLS["test"]))
with zipfile.ZipFile(train_archive_path, 'r') as zf:
train_dir = train_archive_path.parent / "extracted" / train_archive_path.name
print(train_dir)
for member in tqdm.tqdm(zf.infolist(), desc="Extracting training data"):
try:
if not pathlib.Path(train_dir / member.filename).exists():
zf.extract(member, train_dir, pwd=_BASE_URLS_PWD)
except zipfile.error:
print("Error extracting", member.filename)
zf.close()
with zipfile.ZipFile(test_archive_path, 'r') as zf:
test_dir = test_archive_path.parent / "extracted" / test_archive_path.name
for member in tqdm.tqdm(zf.infolist(), desc="Extracting test data"):
try:
if not pathlib.Path(test_dir / member.filename).exists():
zf.extract(member, test_dir, pwd=_BASE_URLS_PWD)
except zipfile.error:
print("Error extracting", member.filename)
zf.close()
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dir": train_dir,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_dir": test_dir,
},
),
]
def _generate_examples(self, data_dir):
"""Generate images and labels for splits."""
rglob_lowercase = pathlib.Path(data_dir).rglob("*.png")
rglob_uppercase = pathlib.Path(data_dir).rglob("*.PNG")
rglob = itertools.chain(rglob_lowercase, rglob_uppercase)
for index, filepath in enumerate(rglob):
filename = filepath.with_suffix("").name
prefix, attempt = filename.split("_")
if len(prefix) == 7:
forger = prefix[:4]
writer = prefix[4:]
label = "forgeries"
else:
writer = prefix
forger = -1
label = "genuine"
yield index, {
"image": str(filepath),
"label": label,
"forger": forger,
"writer": writer,
"attempt": attempt,
}