Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- found
|
4 |
language_creators:
|
|
|
1 |
---
|
2 |
+
pretty_name: DBRD
|
3 |
annotations_creators:
|
4 |
- found
|
5 |
language_creators:
|
dbrd.py
CHANGED
@@ -17,8 +17,6 @@
|
|
17 |
"""Dutch Book Review Dataset"""
|
18 |
|
19 |
|
20 |
-
import os
|
21 |
-
|
22 |
import datasets
|
23 |
from datasets.tasks import TextClassification
|
24 |
|
@@ -85,42 +83,30 @@ class DBRD(datasets.GeneratorBasedBuilder):
|
|
85 |
task_templates=[TextClassification(text_column="text", label_column="label")],
|
86 |
)
|
87 |
|
88 |
-
def _vocab_text_gen(self, archive):
|
89 |
-
for _, ex in self._generate_examples(archive, os.path.join("DBRD", "train")):
|
90 |
-
yield ex["text"]
|
91 |
-
|
92 |
def _split_generators(self, dl_manager):
|
93 |
-
|
94 |
-
data_dir = os.path.join(arch_path, "DBRD")
|
95 |
return [
|
96 |
datasets.SplitGenerator(
|
97 |
-
name=datasets.Split.TRAIN, gen_kwargs={"
|
98 |
),
|
99 |
datasets.SplitGenerator(
|
100 |
-
name=datasets.Split.TEST, gen_kwargs={"
|
101 |
),
|
102 |
datasets.SplitGenerator(
|
103 |
name=datasets.Split("unsupervised"),
|
104 |
-
gen_kwargs={"
|
105 |
),
|
106 |
]
|
107 |
|
108 |
-
def _generate_examples(self,
|
109 |
"""Generate DBRD examples."""
|
110 |
# For labeled examples, extract the label from the path.
|
111 |
if labeled:
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
for key in files:
|
117 |
-
for id_, file in enumerate(files[key]):
|
118 |
-
filepath = os.path.join(directory, key, file)
|
119 |
-
with open(filepath, encoding="UTF-8") as f:
|
120 |
-
yield key + "_" + str(id_), {"text": f.read(), "label": key}
|
121 |
else:
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
with open(filepath, encoding="UTF-8") as f:
|
126 |
-
yield id_, {"text": f.read(), "label": -1}
|
|
|
17 |
"""Dutch Book Review Dataset"""
|
18 |
|
19 |
|
|
|
|
|
20 |
import datasets
|
21 |
from datasets.tasks import TextClassification
|
22 |
|
|
|
83 |
task_templates=[TextClassification(text_column="text", label_column="label")],
|
84 |
)
|
85 |
|
|
|
|
|
|
|
|
|
86 |
def _split_generators(self, dl_manager):
|
87 |
+
archive = dl_manager.download(_DOWNLOAD_URL)
|
|
|
88 |
return [
|
89 |
datasets.SplitGenerator(
|
90 |
+
name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "train"}
|
91 |
),
|
92 |
datasets.SplitGenerator(
|
93 |
+
name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "test"}
|
94 |
),
|
95 |
datasets.SplitGenerator(
|
96 |
name=datasets.Split("unsupervised"),
|
97 |
+
gen_kwargs={"files": dl_manager.iter_archive(archive), "split": "unsup", "labeled": False},
|
98 |
),
|
99 |
]
|
100 |
|
101 |
+
def _generate_examples(self, files, split, labeled=True):
|
102 |
"""Generate DBRD examples."""
|
103 |
# For labeled examples, extract the label from the path.
|
104 |
if labeled:
|
105 |
+
for path, f in files:
|
106 |
+
if path.startswith(f"DBRD/{split}"):
|
107 |
+
label = {"pos": 1, "neg": 0}[path.split("/")[2]]
|
108 |
+
yield path, {"text": f.read().decode("utf-8"), "label": label}
|
|
|
|
|
|
|
|
|
|
|
109 |
else:
|
110 |
+
for path, f in files:
|
111 |
+
if path.startswith(f"DBRD/{split}"):
|
112 |
+
yield path, {"text": f.read().decode("utf-8"), "label": -1}
|
|
|
|