Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
06faf30
1 Parent(s): 831a54b

Upload thai_tnhc2_books.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. thai_tnhc2_books.py +143 -0
thai_tnhc2_books.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This dataset collects all 353 books from the Thai National Historical Corpus 2 (TNHC2) corpus. The dataset has been cleaned to use text for pretraining models and NLP tasks. The TNHC2 corpus is a Thai old books corpus and all books are copyright expired according to Thai law (50 years after the author's death). More information on this corpus can be found here: https://www.arts.chula.ac.th/chulaseal/tnhc2/.
18
+ """
19
+ from pathlib import Path
20
+ from typing import Dict, List, Tuple
21
+
22
+ import datasets
23
+ import pandas as pd
24
+
25
+ from seacrowd.utils import schemas
26
+ from seacrowd.utils.configs import SEACrowdConfig
27
+ from seacrowd.utils.constants import Tasks, Licenses
28
+
29
+ _CITATION = """\
30
+ @dataset{phatthiyaphaibun_2024_10783421,
31
+ author = {Phatthiyaphaibun, Wannaphong},
32
+ title = {Thai TNHC2 Books},
33
+ month = mar,
34
+ year = 2024,
35
+ publisher = {Zenodo},
36
+ doi = {10.5281/zenodo.10783421},
37
+ url = {https://doi.org/10.5281/zenodo.10783421}
38
+ }
39
+ """
40
+
41
+ _DATASETNAME = "thai_tnhc2_books"
42
+
43
+ _DESCRIPTION = """\
44
+ This dataset collects all 353 books from the Thai National Historical Corpus 2 (TNHC2) corpus. The dataset has been cleaned to use text for pretraining models and NLP tasks. The TNHC2 corpus is a Thai old books corpus and all books are copyright expired according to Thai law (50 years after the author's death). More information on this corpus can be found here: https://www.arts.chula.ac.th/chulaseal/tnhc2/.
45
+ """
46
+
47
+ _HOMEPAGE = "https://www.arts.chula.ac.th/chulaseal/tnhc2/"
48
+
49
+ _LANGUAGES = ["tha"]
50
+
51
+ _LICENSE = Licenses.CC0_1_0.value
52
+
53
+ _LOCAL = False
54
+
55
+ _URLS = "https://huggingface.co/datasets/pythainlp/thai-tnhc2-books/resolve/main/data/train-00000-of-00001.parquet?download=true"
56
+
57
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
58
+
59
+ _SOURCE_VERSION = "1.0.0"
60
+
61
+ _SEACROWD_VERSION = "2024.06.20"
62
+
63
+ class ThaiTnhc2BooksDataset(datasets.GeneratorBasedBuilder):
64
+ """This dataset collects all 353 books from the Thai National Historical Corpus 2 (TNHC2) corpus. The dataset has been cleaned to use text for pretraining models and NLP tasks. The TNHC2 corpus is a Thai old books corpus and all books are copyright expired according to Thai law (50 years after the author's death). More information on this corpus can be found here: https://www.arts.chula.ac.th/chulaseal/tnhc2/."""
65
+
66
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
67
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
68
+
69
+ BUILDER_CONFIGS = [
70
+ SEACrowdConfig(
71
+ name=f"{_DATASETNAME}_source",
72
+ version=SOURCE_VERSION,
73
+ description=f"{_DATASETNAME} source schema",
74
+ schema="source",
75
+ subset_id=f"{_DATASETNAME}",
76
+ ),
77
+ SEACrowdConfig(
78
+ name=f"{_DATASETNAME}_seacrowd_ssp",
79
+ version=SEACROWD_VERSION,
80
+ description=f"{_DATASETNAME} SEACrowd schema",
81
+ schema="seacrowd_ssp",
82
+ subset_id=f"{_DATASETNAME}",
83
+ ),
84
+ ]
85
+
86
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
87
+
88
+ def _info(self) -> datasets.DatasetInfo:
89
+
90
+ if self.config.schema == "source":
91
+ features = datasets.Features({
92
+ "id": datasets.Value("string"),
93
+ "book": datasets.Value("string"),
94
+ "author": datasets.Value("string"),
95
+ "text": datasets.Value("string"),
96
+ })
97
+
98
+ elif self.config.schema == "seacrowd_ssp":
99
+ features = schemas.ssp_features
100
+
101
+ return datasets.DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=features,
104
+ homepage=_HOMEPAGE,
105
+ license=_LICENSE,
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
110
+ """Returns SplitGenerators."""
111
+ data_dir = dl_manager.download_and_extract(_URLS)
112
+
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TRAIN,
116
+ gen_kwargs={
117
+ "filepath": data_dir,
118
+ },
119
+ ),
120
+ ]
121
+
122
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
123
+ """Yields examples as (key, example) tuples."""
124
+ df = pd.read_parquet(filepath)
125
+
126
+ # Handle multiple books with the same id
127
+ df["id"] = df["id"] + "_" + df.groupby("id").cumcount().astype(str)
128
+
129
+ if self.config.schema == "source":
130
+ for i, row in df.iterrows():
131
+ yield i, {
132
+ "id": row["id"],
133
+ "book": row["book"],
134
+ "author": row["author"],
135
+ "text": row["text"],
136
+ }
137
+
138
+ elif self.config.schema == "seacrowd_ssp":
139
+ for i, row in df.iterrows():
140
+ yield i, {
141
+ "id": row["id"],
142
+ "text": row["text"],
143
+ }