albertvillanova HF staff commited on
Commit
bdd45d2
1 Parent(s): c671344

Delete loading script

Browse files
Files changed (1) hide show
  1. dbrd.py +0 -112
dbrd.py DELETED
@@ -1,112 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Dutch Book Review Dataset"""
18
-
19
-
20
- import datasets
21
- from datasets.tasks import TextClassification
22
-
23
-
24
- _DESCRIPTION = """\
25
- The Dutch Book Review Dataset (DBRD) contains over 110k book reviews of which \
26
- 22k have associated binary sentiment polarity labels. It is intended as a \
27
- benchmark for sentiment classification in Dutch and created due to a lack of \
28
- annotated datasets in Dutch that are suitable for this task.
29
- """
30
-
31
- _CITATION = """\
32
- @article{DBLP:journals/corr/abs-1910-00896,
33
- author = {Benjamin van der Burgh and
34
- Suzan Verberne},
35
- title = {The merits of Universal Language Model Fine-tuning for Small Datasets
36
- - a case with Dutch book reviews},
37
- journal = {CoRR},
38
- volume = {abs/1910.00896},
39
- year = {2019},
40
- url = {http://arxiv.org/abs/1910.00896},
41
- archivePrefix = {arXiv},
42
- eprint = {1910.00896},
43
- timestamp = {Fri, 04 Oct 2019 12:28:06 +0200},
44
- biburl = {https://dblp.org/rec/journals/corr/abs-1910-00896.bib},
45
- bibsource = {dblp computer science bibliography, https://dblp.org}
46
- }
47
- """
48
-
49
- _DOWNLOAD_URL = "https://github.com/benjaminvdb/DBRD/releases/download/v3.0/DBRD_v3.tgz"
50
-
51
-
52
- class DBRDConfig(datasets.BuilderConfig):
53
- """BuilderConfig for DBRD."""
54
-
55
- def __init__(self, **kwargs):
56
- """BuilderConfig for DBRD.
57
-
58
- Args:
59
- **kwargs: keyword arguments forwarded to super.
60
- """
61
- super(DBRDConfig, self).__init__(version=datasets.Version("3.0.0", ""), **kwargs)
62
-
63
-
64
- class DBRD(datasets.GeneratorBasedBuilder):
65
- """Dutch Book Review Dataset."""
66
-
67
- BUILDER_CONFIGS = [
68
- DBRDConfig(
69
- name="plain_text",
70
- description="Plain text",
71
- )
72
- ]
73
-
74
- def _info(self):
75
- return datasets.DatasetInfo(
76
- description=_DESCRIPTION,
77
- features=datasets.Features(
78
- {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["neg", "pos"])}
79
- ),
80
- supervised_keys=None,
81
- homepage="https://github.com/benjaminvdb/DBRD",
82
- citation=_CITATION,
83
- task_templates=[TextClassification(text_column="text", label_column="label")],
84
- )
85
-
86
- def _split_generators(self, dl_manager):
87
- archive = dl_manager.download(_DOWNLOAD_URL)
88
- return [
89
- datasets.SplitGenerator(
90
- name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "train"}
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "test"}
94
- ),
95
- datasets.SplitGenerator(
96
- name=datasets.Split("unsupervised"),
97
- gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "unsup", "labeled": False},
98
- ),
99
- ]
100
-
101
- def _generate_examples(self, files, split, labeled=True):
102
- """Generate DBRD examples."""
103
- # For labeled examples, extract the label from the path.
104
- if labeled:
105
- for path, f in files:
106
- if path.startswith(f"DBRD/{split}"):
107
- label = {"pos": 1, "neg": 0}[path.split("/")[2]]
108
- yield path, {"text": f.read().decode("utf-8"), "label": label}
109
- else:
110
- for path, f in files:
111
- if path.startswith(f"DBRD/{split}"):
112
- yield path, {"text": f.read().decode("utf-8"), "label": -1}