shunk031 commited on
Commit
cbbad45
1 Parent(s): 4a0a487

Initialize (#1)

Browse files

* add file

* add files for tests

* add poetry files

* add .gitignore

* add settings for CI

* update test

* update test

* update test

* update scripts

* add settings for CI

* add README.md

* update README.md

* update scripts

* update README.md

.github/workflows/ci.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+ paths-ignore:
9
+ - 'README.md'
10
+
11
+ jobs:
12
+ test:
13
+ runs-on: ubuntu-latest
14
+ strategy:
15
+ matrix:
16
+ python-version: ['3.8', '3.9', '3.10']
17
+
18
+ steps:
19
+ - uses: actions/checkout@v2
20
+ - name: Set up Python ${{ matrix.python-version }}
21
+ uses: actions/setup-python@v2
22
+ with:
23
+ python-version: ${{ matrix.python-version }}
24
+
25
+ - name: Install dependencies
26
+ run: |
27
+ pip install -U pip setuptools wheel poetry
28
+ poetry install
29
+ - name: Format
30
+ run: |
31
+ poetry run black --check .
32
+ - name: Lint
33
+ run: |
34
+ poetry run flake8 . --ignore=E501,W503,E203
35
+ - name: Type check
36
+ run: |
37
+ poetry run mypy . \
38
+ --ignore-missing-imports \
39
+ --no-strict-optional \
40
+ --no-site-packages \
41
+ --cache-dir=/dev/null
42
+
43
+ - name: Run tests
44
+ run: |
45
+ poetry run pytest --color=yes -rf
.github/workflows/push_to_hub.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Hub
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows:
6
+ - CI
7
+ branches:
8
+ - main
9
+ types:
10
+ - completed
11
+
12
+ jobs:
13
+ push_to_hub:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v2
19
+
20
+ - name: Push to Huggingface hub
21
+ env:
22
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
23
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
24
+ run: |
25
+ git fetch --unshallow
26
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/livedoor-news-corpus main
.gitignore ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ ### Python ###
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ #Pipfile.lock
100
+
101
+ # poetry
102
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106
+ #poetry.lock
107
+
108
+ # pdm
109
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110
+ #pdm.lock
111
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112
+ # in version control.
113
+ # https://pdm.fming.dev/#use-with-ide
114
+ .pdm.toml
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
165
+
166
+ ### Python Patch ###
167
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
168
+ poetry.toml
169
+
170
+ # End of https://www.toptal.com/developers/gitignore/api/python
README.md ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset Card for Livedoor News Corpus
2
+
3
+ [![CI](https://github.com/shunk031/huggingface-datasets_livedoor-news-corpus/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_livedoor-news-corpus/actions/workflows/ci.yaml)
4
+
5
+ ## Table of Contents
6
+ - [Table of Contents](#table-of-contents)
7
+ - [Dataset Description](#dataset-description)
8
+ - [Dataset Summary](#dataset-summary)
9
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
10
+ - [Languages](#languages)
11
+ - [Dataset Structure](#dataset-structure)
12
+ - [Data Instances](#data-instances)
13
+ - [Data Fields](#data-fields)
14
+ - [Data Splits](#data-splits)
15
+ - [Dataset Creation](#dataset-creation)
16
+ - [Curation Rationale](#curation-rationale)
17
+ - [Source Data](#source-data)
18
+ - [Annotations](#annotations)
19
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
20
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
21
+ - [Social Impact of Dataset](#social-impact-of-dataset)
22
+ - [Discussion of Biases](#discussion-of-biases)
23
+ - [Other Known Limitations](#other-known-limitations)
24
+ - [Additional Information](#additional-information)
25
+ - [Dataset Curators](#dataset-curators)
26
+ - [Licensing Information](#licensing-information)
27
+ - [Citation Information](#citation-information)
28
+ - [Contributions](#contributions)
29
+
30
+ ## Dataset Description
31
+
32
+ - **Homepage:** http://www.rondhuit.com/download.html#ldcc
33
+ - **Repository:** https://github.com/shunk031/huggingface-datasets_livedoor-news-corpus
34
+
35
+ ### Dataset Summary
36
+
37
+ > 本コーパスは、NHN Japan 株式会社が運営する「livedoor ニュース」のうち、下記のクリエイティブ・コモンズライセンスが適用されるニュース記事を収集し、可能な限り HTML タグを取り除いて作成したものです。
38
+
39
+ ### Supported Tasks and Leaderboards
40
+
41
+ [More Information Needed]
42
+
43
+ ### Languages
44
+
45
+ [More Information Needed]
46
+
47
+ ## Dataset Structure
48
+
49
+ ### Data Instances
50
+
51
+ ```python
52
+ from datasets import load_dataset
53
+
54
+ dataset = load_dataset(
55
+ "shunk031/livedoor-news-corpus",
56
+ train_ratio=0.8,
57
+ val_ratio=0.1,
58
+ test_ratio=0.1,
59
+ random_state=42,
60
+ shuffle=True,
61
+ )
62
+
63
+ print(dataset)
64
+ # DatasetDict({
65
+ # train: Dataset({
66
+ # features: ['url', 'date', 'title', 'content', 'category'],
67
+ # num_rows: 5894
68
+ # })
69
+ # validation: Dataset({
70
+ # features: ['url', 'date', 'title', 'content', 'category'],
71
+ # num_rows: 737
72
+ # })
73
+ # test: Dataset({
74
+ # features: ['url', 'date', 'title', 'content', 'category'],
75
+ # num_rows: 736
76
+ # })
77
+ # })
78
+ ```
79
+
80
+ ### Data Fields
81
+
82
+ [More Information Needed]
83
+
84
+ ### Data Splits
85
+
86
+ [More Information Needed]
87
+
88
+ ## Dataset Creation
89
+
90
+ ### Curation Rationale
91
+
92
+ [More Information Needed]
93
+
94
+ ### Source Data
95
+
96
+ #### Initial Data Collection and Normalization
97
+
98
+ [More Information Needed]
99
+
100
+ #### Who are the source language producers?
101
+
102
+ [More Information Needed]
103
+
104
+ ### Annotations
105
+
106
+ #### Annotation process
107
+
108
+ [More Information Needed]
109
+
110
+ #### Who are the annotators?
111
+
112
+ [More Information Needed]
113
+
114
+ ### Personal and Sensitive Information
115
+
116
+ [More Information Needed]
117
+
118
+ ## Considerations for Using the Data
119
+
120
+ ### Social Impact of Dataset
121
+
122
+ [More Information Needed]
123
+
124
+ ### Discussion of Biases
125
+
126
+ [More Information Needed]
127
+
128
+ ### Other Known Limitations
129
+
130
+ [More Information Needed]
131
+
132
+ ## Additional Information
133
+
134
+ ### Dataset Curators
135
+
136
+ [More Information Needed]
137
+
138
+ ### Licensing Information
139
+
140
+ > 各記事ファイルにはクリエイティブ・コモンズライセンス「表示 – 改変禁止」が適用されます。 クレジット表示についてはニュースカテゴリにより異なるため、ダウンロードしたファイルを展開したサブディレクトリにあるそれぞれの LICENSE.txt をご覧ください。 livedoor は NHN Japan 株式会社の登録商標です。
141
+
142
+ ### Citation Information
143
+
144
+ [More Information Needed]
145
+
146
+ ### Contributions
147
+
148
+ Thanks to [RONDHUIT Co., Ltd.](https://www.rondhuit.com/) for creating this dataset.
livedoor_news_corpus.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import pathlib
3
+ from typing import Dict, List, Union, Optional
4
+ import random
5
+ import datasets as ds
6
+ import math
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ _CITATION = """\
11
+ https://www.rondhuit.com/download.html#ldcc
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+ 本コーパスは、NHN Japan株式会社が運営する「livedoor ニュース」のうち、下記のクリエイティブ・コモンズライセンスが適用されるニュース記事を収集し、可能な限りHTMLタグを取り除いて作成したものです。
16
+ """
17
+
18
+ _HOMEPAGE = "https://www.rondhuit.com/download.html#ldcc"
19
+
20
+ _LICENSE = """\
21
+ 各記事ファイルにはクリエイティブ・コモンズライセンス「表示 – 改変禁止」が適用されます。 クレジット表示についてはニュースカテゴリにより異なるため、ダウンロードしたファイルを展開したサブディレクトリにあるそれぞれの LICENSE.txt をご覧ください。 livedoor はNHN Japan株式会社の登録商標です。
22
+ """
23
+
24
+
25
+ _DOWNLOAD_URL = "https://www.rondhuit.com/download/ldcc-20140209.tar.gz"
26
+
27
+
28
+ class LivedoorNewsCorpusConfig(ds.BuilderConfig):
29
+ def __init__(
30
+ self,
31
+ train_ratio: float = 0.8,
32
+ val_ratio: float = 0.1,
33
+ test_ratio: float = 0.1,
34
+ shuffle: bool = False,
35
+ random_state: int = 0,
36
+ name: str = "default",
37
+ version: Optional[Union[ds.utils.Version, str]] = ds.utils.Version("0.0.0"),
38
+ data_dir: Optional[str] = None,
39
+ data_files: Optional[ds.data_files.DataFilesDict] = None,
40
+ description: Optional[str] = None,
41
+ ) -> None:
42
+ super().__init__(
43
+ name=name,
44
+ version=version,
45
+ data_dir=data_dir,
46
+ data_files=data_files,
47
+ description=description,
48
+ )
49
+ assert train_ratio + val_ratio + test_ratio == 1.0
50
+
51
+ self.train_ratio = train_ratio
52
+ self.val_ratio = val_ratio
53
+ self.test_ratio = test_ratio
54
+
55
+ self.shuffle = shuffle
56
+ self.random_state = random_state
57
+
58
+
59
+ class LivedoorNewsCorpusDataset(ds.GeneratorBasedBuilder):
60
+ VERSION = ds.Version("1.0.0") # type: ignore
61
+
62
+ BUILDER_CONFIG_CLASS = LivedoorNewsCorpusConfig # type: ignore
63
+
64
+ BUILDER_CONFIGS = [
65
+ LivedoorNewsCorpusConfig(
66
+ version=VERSION, # type: ignore
67
+ description="Livedoor ニュースコーパス",
68
+ )
69
+ ]
70
+
71
+ def _info(self) -> ds.DatasetInfo:
72
+ features = ds.Features(
73
+ {
74
+ "url": ds.Value("string"),
75
+ "date": ds.Value("string"),
76
+ "title": ds.Value("string"),
77
+ "content": ds.Value("string"),
78
+ "category": ds.ClassLabel(
79
+ names=[
80
+ "movie-enter",
81
+ "it-life-hack",
82
+ "kaden-channel",
83
+ "topic-news",
84
+ "livedoor-homme",
85
+ "peachy",
86
+ "sports-watch",
87
+ "dokujo-tsushin",
88
+ "smax",
89
+ ]
90
+ ),
91
+ }
92
+ )
93
+ return ds.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ citation=_CITATION,
96
+ homepage=_HOMEPAGE,
97
+ license=_LICENSE,
98
+ features=features,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager: ds.DownloadManager):
102
+ dataset_root = dl_manager.download_and_extract(_DOWNLOAD_URL)
103
+ dataset_root_dir = pathlib.Path(dataset_root) / "text"
104
+
105
+ article_paths = list(dataset_root_dir.glob("*/**/*.txt"))
106
+ article_paths = list(filter(lambda p: p.name != "LICENSE.txt", article_paths))
107
+
108
+ if self.config.shuffle: # type: ignore
109
+ random.seed(self.config.random_state) # type: ignore
110
+ random.shuffle(article_paths)
111
+
112
+ num_articles = len(article_paths)
113
+ num_tng = math.ceil(num_articles * self.config.train_ratio) # type: ignore
114
+ num_val = math.ceil(num_articles * self.config.val_ratio) # type: ignore
115
+ num_tst = math.ceil(num_articles * self.config.test_ratio) # type: ignore
116
+
117
+ tng_articles = article_paths[:num_tng]
118
+ val_articles = article_paths[num_tng : num_tng + num_val]
119
+ tst_articles = article_paths[num_tng + num_val : num_tng + num_val + num_tst]
120
+
121
+ assert len(tng_articles) + len(val_articles) + len(tst_articles) == num_articles
122
+
123
+ return [
124
+ ds.SplitGenerator(
125
+ name=ds.Split.TRAIN, # type: ignore
126
+ gen_kwargs={"article_paths": tng_articles},
127
+ ),
128
+ ds.SplitGenerator(
129
+ name=ds.Split.VALIDATION, # type: ignore
130
+ gen_kwargs={"article_paths": val_articles},
131
+ ),
132
+ ds.SplitGenerator(
133
+ name=ds.Split.TEST, # type: ignore
134
+ gen_kwargs={"article_paths": tst_articles},
135
+ ),
136
+ ]
137
+
138
+ def parse_article(self, article_data: List[str]) -> Dict[str, str]:
139
+ article_url = article_data[0]
140
+ article_date = article_data[1]
141
+ article_title = article_data[2]
142
+ article_content = " ".join(article_data[3:])
143
+
144
+ example_dict = {
145
+ "url": article_url,
146
+ "date": article_date,
147
+ "title": article_title,
148
+ "content": article_content,
149
+ }
150
+ return example_dict
151
+
152
+ def _generate_examples(self, article_paths: List[pathlib.Path]): # type: ignore[override]
153
+
154
+ for i, article_path in enumerate(article_paths):
155
+ article_category = article_path.parent.name
156
+ with open(article_path, "r") as rf:
157
+ article_data = [line.strip() for line in rf]
158
+
159
+ example_dict = self.parse_article(article_data=article_data)
160
+ example_dict["category"] = article_category
161
+ yield i, example_dict
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-livedoor-news-corpus"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <[email protected]>"]
6
+ readme = "README.md"
7
+ packages = []
8
+
9
+ [tool.poetry.dependencies]
10
+ python = ">=3.8.1,<4.0"
11
+ datasets = "^2.8.0"
12
+
13
+
14
+ [tool.poetry.group.dev.dependencies]
15
+ black = "^22.12.0"
16
+ isort = "^5.11.4"
17
+ flake8 = "^6.0.0"
18
+ mypy = "^0.991"
19
+ pytest = "^7.2.1"
20
+
21
+ [build-system]
22
+ requires = ["poetry-core"]
23
+ build-backend = "poetry.core.masonry.api"
tests/__init__.py ADDED
File without changes
tests/livedoor_news_corpus_test.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets as ds
2
+ import pytest
3
+
4
+
5
+ @pytest.fixture
6
+ def dataset_path() -> str:
7
+ return "livedoor_news_corpus.py"
8
+
9
+
10
+ def test_load_dataset(dataset_path: str):
11
+ dataset = ds.load_dataset(path=dataset_path, random_state=42, shuffle=True)
12
+
13
+ assert (
14
+ dataset["train"].num_rows # type: ignore
15
+ + dataset["validation"].num_rows # type: ignore
16
+ + dataset["test"].num_rows # type: ignore
17
+ == 7367
18
+ )
19
+
20
+ assert len(set(dataset["train"]["category"])) == 9 # type: ignore
21
+
22
+
23
+ @pytest.mark.parametrize(
24
+ argnames="tng_ratio, val_ratio, tst_ratio,",
25
+ argvalues=(
26
+ (0.8, 0.1, 0.1),
27
+ (0.6, 0.2, 0.2),
28
+ ),
29
+ )
30
+ def test_train_valid_test_split(
31
+ dataset_path: str,
32
+ tng_ratio: float,
33
+ val_ratio: float,
34
+ tst_ratio: float,
35
+ ):
36
+ assert tng_ratio + val_ratio + tst_ratio == 1.0
37
+ dataset = ds.load_dataset(
38
+ path=dataset_path,
39
+ train_ratio=tng_ratio,
40
+ val_ratio=val_ratio,
41
+ test_ratio=tst_ratio,
42
+ )
43
+
44
+ assert (
45
+ dataset["train"].num_rows # type: ignore
46
+ + dataset["validation"].num_rows # type: ignore
47
+ + dataset["test"].num_rows # type: ignore
48
+ == 7367
49
+ )