Commit
•
38b4fa5
0
Parent(s):
Update files from the datasets library (from 1.2.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.2.0
- .gitattributes +27 -0
- README.md +174 -0
- dataset_infos.json +1 -0
- dbrd.py +125 -0
- dummy/plain_text/3.0.0/dummy_data.zip +3 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- found
|
4 |
+
language_creators:
|
5 |
+
- found
|
6 |
+
languages:
|
7 |
+
- nl
|
8 |
+
licenses:
|
9 |
+
- cc-by-nc-sa-4-0
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
size_categories:
|
13 |
+
- 100K<n<1M
|
14 |
+
source_datasets:
|
15 |
+
- original
|
16 |
+
task_categories:
|
17 |
+
- sequence-modeling
|
18 |
+
- text-classification
|
19 |
+
task_ids:
|
20 |
+
- language-modeling
|
21 |
+
- sentiment-classification
|
22 |
+
---
|
23 |
+
|
24 |
+
# Dataset Card Creation Guide
|
25 |
+
|
26 |
+
## Table of Contents
|
27 |
+
- [Dataset Description](#dataset-description)
|
28 |
+
- [Dataset Summary](#dataset-summary)
|
29 |
+
- [Supported Tasks](#supported-tasks-and-leaderboards)
|
30 |
+
- [Languages](#languages)
|
31 |
+
- [Dataset Structure](#dataset-structure)
|
32 |
+
- [Data Instances](#data-instances)
|
33 |
+
- [Data Fields](#data-instances)
|
34 |
+
- [Data Splits](#data-instances)
|
35 |
+
- [Dataset Creation](#dataset-creation)
|
36 |
+
- [Curation Rationale](#curation-rationale)
|
37 |
+
- [Source Data](#source-data)
|
38 |
+
- [Annotations](#annotations)
|
39 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
40 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
41 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
42 |
+
- [Discussion of Biases](#discussion-of-biases)
|
43 |
+
- [Other Known Limitations](#other-known-limitations)
|
44 |
+
- [Additional Information](#additional-information)
|
45 |
+
- [Dataset Curators](#dataset-curators)
|
46 |
+
- [Licensing Information](#licensing-information)
|
47 |
+
- [Citation Information](#citation-information)
|
48 |
+
|
49 |
+
## Dataset Description
|
50 |
+
|
51 |
+
- **Homepage:** [Dutch Book Review Dataset (DBRD) homepage](https://benjaminvdb.github.io/DBRD)
|
52 |
+
- **Repository:** https://github.com/benjaminvdb/DBRD
|
53 |
+
- **Paper:** [The merits of Universal Language Model Fine-tuning for Small Datasets - a case with Dutch book reviews](https://arxiv.org/abs/1910.00896)
|
54 |
+
- **Leaderboard:**
|
55 |
+
- **Point of Contact:** [Benjamin van der Burgh](mailto:[email protected])
|
56 |
+
|
57 |
+
### Dataset Summary
|
58 |
+
|
59 |
+
The DBRD (pronounced *dee-bird*) dataset contains over 110k book reviews of which 22k have associated binary sentiment polarity labels. It is intended as a benchmark for sentiment classification in Dutch and created due to a lack of annotated datasets in Dutch that are suitable for this task.
|
60 |
+
|
61 |
+
### Supported Tasks and Leaderboards
|
62 |
+
|
63 |
+
- `sequence-modeling`: The dataset can be used to train a model for sequence modeling, more specifically language modeling.
|
64 |
+
- `text-classification`: The dataset can be used to train a model for text classification, more specifically sentiment classification, using the provided positive/negative sentiment polarity labels.
|
65 |
+
|
66 |
+
### Languages
|
67 |
+
|
68 |
+
Non-Dutch reviews were filtered out using [langdetect](https://github.com/Mimino666/langdetect), and all reviews should therefore be in Dutch (nl). They are written by reviewers on [Hebban](https://www.hebban.nl), a Dutch website for book reviews.
|
69 |
+
|
70 |
+
## Dataset Structure
|
71 |
+
|
72 |
+
### Data Instances
|
73 |
+
|
74 |
+
The dataset contains three subsets: train, test and unsupervised. The `train` and `test` sets contain labels, while the `unsupervised` set doesn't (the label value is -1 for each instance in `unsupervised`). Here's an example of a positive review, indicated with a label value of `1`.
|
75 |
+
|
76 |
+
```
|
77 |
+
{
|
78 |
+
'label': 1,
|
79 |
+
'text': 'Super om te lezen hoe haar leven is vergaan.\nBijzonder dat ze zo openhartig is geweest.'
|
80 |
+
}
|
81 |
+
```
|
82 |
+
|
83 |
+
### Data Fields
|
84 |
+
|
85 |
+
- `label`: either 0 (negative) or 1 (positive) in the supervised sets `train` and `test`. These are always -1 for the unsupervised set.
|
86 |
+
- `text`: book review as utf-8 encoded string.
|
87 |
+
|
88 |
+
### Data Splits
|
89 |
+
|
90 |
+
The `train` and `test` sets were constructed by extracting all non-neutral reviews, because we want to assign either a positive or negative polarity label to each instance. Furthermore, the positive (pos) and negative (neg) labels were balanced in both train and test sets. The remainder was added to the unsupervised set.
|
91 |
+
|
92 |
+
| | Train | Valid | Test |
|
93 |
+
| ----- | ------ | ----- | ------ |
|
94 |
+
| # No. texts | 20028 | 2224 | 96264 |
|
95 |
+
|
96 |
+
## Dataset Creation
|
97 |
+
|
98 |
+
### Curation Rationale
|
99 |
+
|
100 |
+
This dataset was created due to a lack of annotated Dutch text that is suitable for sentiment classification. Non-Dutch texts were therefore removed, but other than that, no curation was done.
|
101 |
+
|
102 |
+
### Source Data
|
103 |
+
|
104 |
+
The book reviews were taken from [Hebban](https://www.hebban.nl), a Dutch platform for book reviews.
|
105 |
+
|
106 |
+
#### Initial Data Collection and Normalization
|
107 |
+
|
108 |
+
The source code of the scraper and preprocessing process can be found in the [DBRD GitHub repository](https://github.com/benjaminvdb/DBRD).
|
109 |
+
|
110 |
+
#### Who are the source language producers?
|
111 |
+
|
112 |
+
The reviews are written by users of [Hebban](https://www.hebban.nl) and are of varying quality. Some are short, others long, and many contain spelling mistakes and other errors.
|
113 |
+
|
114 |
+
### Annotations
|
115 |
+
|
116 |
+
Each book review was accompanied by a 1 to 5-star rating. The annotations are produced by mapping the user-provided ratings to either positive or negative label. 1 and 2-star ratings are given the negative label `0` and 4 and 5-star ratings the positive label `1`. Reviews with a rating of 3 stars are considered neutral and left out of the `train`/`test` sets and added to the unsupervised set.
|
117 |
+
|
118 |
+
#### Annotation process
|
119 |
+
|
120 |
+
Users of [Hebban](https://www.hebban.nl) were unaware that their reviews would be used in the creation of this dataset.
|
121 |
+
|
122 |
+
#### Who are the annotators?
|
123 |
+
|
124 |
+
The annotators are the [Hebban](https://www.hebban.nl) users who wrote the book review associated with the annotation. Anyone can register on [Hebban](https://www.hebban.nl) and it's impossible to know the demographics of this group.
|
125 |
+
|
126 |
+
### Personal and Sensitive Information
|
127 |
+
|
128 |
+
The book reviews and ratings are publicly available on [Hebban](https://www.hebban.nl) and no personal or otherwise sensitive information is contained in this dataset.
|
129 |
+
|
130 |
+
## Considerations for Using the Data
|
131 |
+
|
132 |
+
### Social Impact of Dataset
|
133 |
+
|
134 |
+
While prediciting sentiment of book reviews in itself is not that interesting on its own, the value of this dataset lies in its capability for benchmarking models. The dataset contains some challenges that are common to outings on the internet, such as spelling mistakes and other errors. It is therefore very useful for validating models for their real-world performance. These datasets are abundant for English, but are harder to find for Dutch, making it a valuable resource for ML tasks in this language.
|
135 |
+
|
136 |
+
### Discussion of Biases
|
137 |
+
|
138 |
+
[More Information Needed]
|
139 |
+
|
140 |
+
### Other Known Limitations
|
141 |
+
|
142 |
+
Reviews on [Hebban](https://www.hebban.nl) are usually written in Dutch, but some have been written in English and possibly in other languages. While we've done our best to filter out non-Dutch texts, it's hard to do this without errors. For example, some reviews are in multiple languages, and these might slip through. Also be aware that some commercial outings can appear in the text, making them different from other reviews and influencing your models. While this doesn't pose a major issue in most cases, I just wanted to mention it briefly.
|
143 |
+
|
144 |
+
## Additional Information
|
145 |
+
|
146 |
+
### Dataset Curators
|
147 |
+
|
148 |
+
This dataset was created by [Benjamin van der Burgh](mailto:[email protected]), who was working at [Leiden Institute of Advanced Computer Science (LIACS)](https://liacs.leidenuniv.nl/) at the time.
|
149 |
+
|
150 |
+
### Licensing Information
|
151 |
+
|
152 |
+
The dataset is licensed under a [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/).
|
153 |
+
|
154 |
+
### Citation Information
|
155 |
+
|
156 |
+
Please use the following citation when making use of this dataset in your work.
|
157 |
+
|
158 |
+
```
|
159 |
+
@article{DBLP:journals/corr/abs-1910-00896,
|
160 |
+
author = {Benjamin van der Burgh and
|
161 |
+
Suzan Verberne},
|
162 |
+
title = {The merits of Universal Language Model Fine-tuning for Small Datasets
|
163 |
+
- a case with Dutch book reviews},
|
164 |
+
journal = {CoRR},
|
165 |
+
volume = {abs/1910.00896},
|
166 |
+
year = {2019},
|
167 |
+
url = {http://arxiv.org/abs/1910.00896},
|
168 |
+
archivePrefix = {arXiv},
|
169 |
+
eprint = {1910.00896},
|
170 |
+
timestamp = {Fri, 04 Oct 2019 12:28:06 +0200},
|
171 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-1910-00896.bib},
|
172 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
173 |
+
}
|
174 |
+
```
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"plain_text": {"description": "Dutch Book Review Dataset\nThe DBRD (pronounced dee-bird) dataset contains over 110k book reviews along with associated binary sentiment polarity labels and is intended as a benchmark for sentiment classification in Dutch.\n", "citation": "@article{DBLP:journals/corr/abs-1910-00896,\n author = {Benjamin van der Burgh and\n Suzan Verberne},\n title = {The merits of Universal Language Model Fine-tuning for Small Datasets\n - a case with Dutch book reviews},\n journal = {CoRR},\n volume = {abs/1910.00896},\n year = {2019},\n url = {http://arxiv.org/abs/1910.00896},\n archivePrefix = {arXiv},\n eprint = {1910.00896},\n timestamp = {Fri, 04 Oct 2019 12:28:06 +0200},\n biburl = {https://dblp.org/rec/journals/corr/abs-1910-00896.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/benjaminvdb/DBRD", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "dbrd", "config_name": "plain_text", "version": {"version_str": "3.0.0", "description": "", "major": 3, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29496333, "num_examples": 20028, "dataset_name": "dbrd"}, "test": {"name": "test", "num_bytes": 3246243, "num_examples": 2224, "dataset_name": "dbrd"}, "unsupervised": {"name": "unsupervised", "num_bytes": 152733031, "num_examples": 96264, "dataset_name": "dbrd"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1k5UMoqoB3RT4kK9FI5Xyl7RmWWyBSwux": {"num_bytes": 79065872, "checksum": "2d7eed5a2c56b19fec22f1656722b6036569aa542d362e576bd761eb91e1e76a"}}, "download_size": 79065872, "post_processing_size": null, "dataset_size": 185475607, "size_in_bytes": 264541479}}
|
dbrd.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
# Lint as: python3
|
17 |
+
"""Dutch Book Review Dataset"""
|
18 |
+
|
19 |
+
from __future__ import absolute_import, division, print_function
|
20 |
+
|
21 |
+
import os
|
22 |
+
|
23 |
+
import datasets
|
24 |
+
|
25 |
+
|
26 |
+
_DESCRIPTION = """\
|
27 |
+
Dutch Book Review Dataset
|
28 |
+
The DBRD (pronounced dee-bird) dataset contains over 110k book reviews along \
|
29 |
+
with associated binary sentiment polarity labels and is intended as a \
|
30 |
+
benchmark for sentiment classification in Dutch.
|
31 |
+
"""
|
32 |
+
|
33 |
+
_CITATION = """\
|
34 |
+
@article{DBLP:journals/corr/abs-1910-00896,
|
35 |
+
author = {Benjamin van der Burgh and
|
36 |
+
Suzan Verberne},
|
37 |
+
title = {The merits of Universal Language Model Fine-tuning for Small Datasets
|
38 |
+
- a case with Dutch book reviews},
|
39 |
+
journal = {CoRR},
|
40 |
+
volume = {abs/1910.00896},
|
41 |
+
year = {2019},
|
42 |
+
url = {http://arxiv.org/abs/1910.00896},
|
43 |
+
archivePrefix = {arXiv},
|
44 |
+
eprint = {1910.00896},
|
45 |
+
timestamp = {Fri, 04 Oct 2019 12:28:06 +0200},
|
46 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-1910-00896.bib},
|
47 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
48 |
+
}
|
49 |
+
"""
|
50 |
+
|
51 |
+
_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1k5UMoqoB3RT4kK9FI5Xyl7RmWWyBSwux"
|
52 |
+
|
53 |
+
|
54 |
+
class DBRDConfig(datasets.BuilderConfig):
|
55 |
+
"""BuilderConfig for DBRD."""
|
56 |
+
|
57 |
+
def __init__(self, **kwargs):
|
58 |
+
"""BuilderConfig for DBRD.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
**kwargs: keyword arguments forwarded to super.
|
62 |
+
"""
|
63 |
+
super(DBRDConfig, self).__init__(version=datasets.Version("3.0.0", ""), **kwargs)
|
64 |
+
|
65 |
+
|
66 |
+
class DBRD(datasets.GeneratorBasedBuilder):
|
67 |
+
"""Dutch Book Review Dataset."""
|
68 |
+
|
69 |
+
BUILDER_CONFIGS = [
|
70 |
+
DBRDConfig(
|
71 |
+
name="plain_text",
|
72 |
+
description="Plain text",
|
73 |
+
)
|
74 |
+
]
|
75 |
+
|
76 |
+
def _info(self):
|
77 |
+
return datasets.DatasetInfo(
|
78 |
+
description=_DESCRIPTION,
|
79 |
+
features=datasets.Features(
|
80 |
+
{"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["neg", "pos"])}
|
81 |
+
),
|
82 |
+
supervised_keys=None,
|
83 |
+
homepage="https://github.com/benjaminvdb/DBRD",
|
84 |
+
citation=_CITATION,
|
85 |
+
)
|
86 |
+
|
87 |
+
def _vocab_text_gen(self, archive):
|
88 |
+
for _, ex in self._generate_examples(archive, os.path.join("DBRD", "train")):
|
89 |
+
yield ex["text"]
|
90 |
+
|
91 |
+
def _split_generators(self, dl_manager):
|
92 |
+
arch_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
|
93 |
+
data_dir = os.path.join(arch_path, "DBRD")
|
94 |
+
return [
|
95 |
+
datasets.SplitGenerator(
|
96 |
+
name=datasets.Split.TRAIN, gen_kwargs={"directory": os.path.join(data_dir, "train")}
|
97 |
+
),
|
98 |
+
datasets.SplitGenerator(
|
99 |
+
name=datasets.Split.TEST, gen_kwargs={"directory": os.path.join(data_dir, "test")}
|
100 |
+
),
|
101 |
+
datasets.SplitGenerator(
|
102 |
+
name=datasets.Split("unsupervised"),
|
103 |
+
gen_kwargs={"directory": os.path.join(data_dir, "unsup"), "labeled": False},
|
104 |
+
),
|
105 |
+
]
|
106 |
+
|
107 |
+
def _generate_examples(self, directory, labeled=True):
|
108 |
+
"""Generate DBRD examples."""
|
109 |
+
# For labeled examples, extract the label from the path.
|
110 |
+
if labeled:
|
111 |
+
files = {
|
112 |
+
"pos": sorted(os.listdir(os.path.join(directory, "pos"))),
|
113 |
+
"neg": sorted(os.listdir(os.path.join(directory, "neg"))),
|
114 |
+
}
|
115 |
+
for key in files:
|
116 |
+
for id_, file in enumerate(files[key]):
|
117 |
+
filepath = os.path.join(directory, key, file)
|
118 |
+
with open(filepath, encoding="UTF-8") as f:
|
119 |
+
yield key + "_" + str(id_), {"text": f.read(), "label": key}
|
120 |
+
else:
|
121 |
+
unsup_files = sorted(os.listdir(directory))
|
122 |
+
for id_, file in enumerate(unsup_files):
|
123 |
+
filepath = os.path.join(directory, file)
|
124 |
+
with open(filepath, encoding="UTF-8") as f:
|
125 |
+
yield id_, {"text": f.read(), "label": -1}
|
dummy/plain_text/3.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b98a97db61975eced63d92129d6d506a8164d89afe55ee46f15cb5f5035843d3
|
3 |
+
size 24684
|