|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Dutch Book Review Dataset""" |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Dutch Book Review Dataset |
|
The DBRD (pronounced dee-bird) dataset contains over 110k book reviews along \ |
|
with associated binary sentiment polarity labels and is intended as a \ |
|
benchmark for sentiment classification in Dutch. |
|
""" |
|
|
|
_CITATION = """\ |
|
@article{DBLP:journals/corr/abs-1910-00896, |
|
author = {Benjamin van der Burgh and |
|
Suzan Verberne}, |
|
title = {The merits of Universal Language Model Fine-tuning for Small Datasets |
|
- a case with Dutch book reviews}, |
|
journal = {CoRR}, |
|
volume = {abs/1910.00896}, |
|
year = {2019}, |
|
url = {http://arxiv.org/abs/1910.00896}, |
|
archivePrefix = {arXiv}, |
|
eprint = {1910.00896}, |
|
timestamp = {Fri, 04 Oct 2019 12:28:06 +0200}, |
|
biburl = {https://dblp.org/rec/journals/corr/abs-1910-00896.bib}, |
|
bibsource = {dblp computer science bibliography, https://dblp.org} |
|
} |
|
""" |
|
|
|
_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1k5UMoqoB3RT4kK9FI5Xyl7RmWWyBSwux" |
|
|
|
|
|
class DBRDConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for DBRD.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for DBRD. |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(DBRDConfig, self).__init__(version=datasets.Version("3.0.0", ""), **kwargs) |
|
|
|
|
|
class DBRD(datasets.GeneratorBasedBuilder): |
|
"""Dutch Book Review Dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
DBRDConfig( |
|
name="plain_text", |
|
description="Plain text", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["neg", "pos"])} |
|
), |
|
supervised_keys=None, |
|
homepage="https://github.com/benjaminvdb/DBRD", |
|
citation=_CITATION, |
|
) |
|
|
|
def _vocab_text_gen(self, archive): |
|
for _, ex in self._generate_examples(archive, os.path.join("DBRD", "train")): |
|
yield ex["text"] |
|
|
|
def _split_generators(self, dl_manager): |
|
arch_path = dl_manager.download_and_extract(_DOWNLOAD_URL) |
|
data_dir = os.path.join(arch_path, "DBRD") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"directory": os.path.join(data_dir, "train")} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"directory": os.path.join(data_dir, "test")} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split("unsupervised"), |
|
gen_kwargs={"directory": os.path.join(data_dir, "unsup"), "labeled": False}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, directory, labeled=True): |
|
"""Generate DBRD examples.""" |
|
|
|
if labeled: |
|
files = { |
|
"pos": sorted(os.listdir(os.path.join(directory, "pos"))), |
|
"neg": sorted(os.listdir(os.path.join(directory, "neg"))), |
|
} |
|
for key in files: |
|
for id_, file in enumerate(files[key]): |
|
filepath = os.path.join(directory, key, file) |
|
with open(filepath, encoding="UTF-8") as f: |
|
yield key + "_" + str(id_), {"text": f.read(), "label": key} |
|
else: |
|
unsup_files = sorted(os.listdir(directory)) |
|
for id_, file in enumerate(unsup_files): |
|
filepath = os.path.join(directory, file) |
|
with open(filepath, encoding="UTF-8") as f: |
|
yield id_, {"text": f.read(), "label": -1} |
|
|