holylovenia commited on
Commit
891f6c8
1 Parent(s): 1719bca

Upload malaysia_ai_hansard.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. malaysia_ai_hansard.py +158 -0
malaysia_ai_hansard.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The Malaysia AI Hansard Scrape dataset contains 142,766 PDFs from the Malaysian Parliament website.
18
+ """
19
+
20
+ from pathlib import Path
21
+ from typing import Dict, List, Tuple
22
+
23
+ import datasets
24
+
25
+ from seacrowd.utils import schemas
26
+ from seacrowd.utils.configs import SEACrowdConfig
27
+ from seacrowd.utils.constants import Licenses, Tasks
28
+
29
+ _CITATION = """\
30
+ @article{malaysua_ai_hansard,
31
+ author = {{Malaysia-AI}},
32
+ title = {Crawl Malaysian Hansard},
33
+ year = {2023}, % Change to the relevant year if known
34
+ url = {https://huggingface.co/datasets/malaysia-ai/crawl-malaysian-hansard}
35
+
36
+ }
37
+ """
38
+
39
+ _DATASETNAME = "malaysia_ai_hansard"
40
+
41
+ _DESCRIPTION = """\
42
+ The Malaysia AI Hansard Scrape dataset contains 142,766 PDFs from the Malaysian Parliament website.
43
+ (https://www.parlimen.gov.my/hansard-dewan-rakyat.html?uweb=dr).
44
+ It includes a JSON file for each document with the text labeled "original", page numbers "no_page" and "actual_no_page", the document's "date", and the "url" of the original PDF.
45
+ """
46
+
47
+ _HOMEPAGE = "https://huggingface.co/datasets/malaysia-ai/crawl-malaysian-hansard"
48
+
49
+ _LANGUAGES = ["zlm"]
50
+
51
+ _LICENSE = Licenses.APACHE_2_0.value
52
+
53
+ _LOCAL = False
54
+
55
+
56
+ _URLS = {
57
+ _DATASETNAME: "https://huggingface.co/datasets/malaysia-ai/crawl-malaysian-hansard",
58
+ }
59
+
60
+
61
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
62
+ _SOURCE_VERSION = "1.0.0"
63
+
64
+ _SEACROWD_VERSION = "2024.06.20"
65
+
66
+
67
+ class MalaysiaAIHansardDataset(datasets.GeneratorBasedBuilder):
68
+ """Malaysia AI Hansard Scrape dataset contains 142,766 PDFs from the Malaysian Parliament website."""
69
+
70
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
71
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
72
+
73
+ BUILDER_CONFIGS = [
74
+ SEACrowdConfig(
75
+ name="malaysia_ai_hansard_source",
76
+ version=SOURCE_VERSION,
77
+ description="malaysia_ai_hansard source schema",
78
+ schema="source",
79
+ subset_id="malaysia_ai_hansard",
80
+ ),
81
+ SEACrowdConfig(
82
+ name="malaysia_ai_hansard_seacrowd_ssp",
83
+ version=SEACROWD_VERSION,
84
+ description="malaysia_ai_hansard SEACrowd schema",
85
+ schema="seacrowd_ssp",
86
+ subset_id="malaysia_ai_hansard",
87
+ ),
88
+ ]
89
+
90
+ DEFAULT_CONFIG_NAME = "malaysia_ai_hansard_source"
91
+
92
+ def _info(self) -> datasets.DatasetInfo:
93
+ # Create the source schema; this schema will keep all keys/information/labels as close to the original dataset as possible.
94
+
95
+ # You can arbitrarily nest lists and dictionaries.
96
+ # For iterables, use lists over tuples or `datasets.Sequence`
97
+
98
+ if self.config.schema == "source":
99
+ features = datasets.Features(
100
+ {
101
+ "original": datasets.Value("string"),
102
+ "cleaned": datasets.Value("string"),
103
+ "no_page": datasets.Value("string"),
104
+ "actual_no_page": datasets.Value("string"),
105
+ "date": datasets.Value("string"),
106
+ "url": datasets.Value("string"),
107
+ }
108
+ )
109
+
110
+ elif self.config.schema == "seacrowd_ssp":
111
+ features = schemas.ssp_features
112
+
113
+ return datasets.DatasetInfo(
114
+ description=_DESCRIPTION,
115
+ features=features,
116
+ homepage=_HOMEPAGE,
117
+ license=_LICENSE,
118
+ citation=_CITATION,
119
+ )
120
+
121
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
122
+ """Returns SplitGenerators."""
123
+
124
+ urls = _URLS[_DATASETNAME]
125
+ # data_dir = dl_manager.download_and_extract(urls)
126
+ # dl_manager not used since dataloader uses HF 'load_dataset'
127
+
128
+ return [
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TRAIN,
131
+ gen_kwargs={
132
+ "filepath": urls,
133
+ "split": "train",
134
+ },
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
139
+ """Yields examples as (key, example) tuples."""
140
+
141
+ data = datasets.load_dataset("/".join(filepath.split("/")[-2:]), split="train")
142
+
143
+ for key, sample in enumerate(data):
144
+ if self.config.schema == "source":
145
+ yield key, {
146
+ "original": sample["original"],
147
+ "cleaned": sample["cleaned"],
148
+ "no_page": sample["no_page"],
149
+ "actual_no_page": sample["actual_no_page"],
150
+ "date": sample["date"],
151
+ "url": sample["url"],
152
+ }
153
+
154
+ elif self.config.schema == "seacrowd_ssp":
155
+ yield key, {
156
+ "id": key,
157
+ "text": sample["cleaned"],
158
+ }