Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
57c976e
1 Parent(s): 8d5cac1

Upload indonesian_news_dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indonesian_news_dataset.py +132 -0
indonesian_news_dataset.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ from pathlib import Path
3
+ from typing import List
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """\
12
+ @misc{andreaschandra2020,
13
+ author = {Chandra, Andreas},
14
+ title = {Indonesian News Dataset},
15
+ year = {2020},
16
+ howpublished = {Online},
17
+ url = {https://github.com/andreaschandra/indonesian-news},
18
+ note = {Accessed: 2024-02-13},
19
+ }
20
+ """
21
+
22
+ _LANGUAGES = ["ind"]
23
+
24
+ _DATASETNAME = "indonesian_news_dataset"
25
+
26
+ _DESCRIPTION = """An imbalanced dataset to classify Indonesian News articles.
27
+ The dataset contains 5 class labels: bola, news, bisnis, tekno, and otomotif.
28
+ The dataset comprises of around 6k train and 2.5k test examples, with the more prevalent classes
29
+ (bola and news) having roughly 10x the number of train and test examples than the least prevalent class (otomotif).
30
+ """
31
+
32
+ _HOMEPAGE = "https://github.com/andreaschandra/indonesian-news"
33
+
34
+ _LICENSE = Licenses.UNKNOWN.value
35
+
36
+ _URLS = {
37
+ f"{_DATASETNAME}_train": "https://drive.usercontent.google.com/u/0/uc?id=1wCwPMKSyTciv8I3g9xGdUfEraA1SydG6&export=download",
38
+ f"{_DATASETNAME}_test": "https://drive.usercontent.google.com/u/0/uc?id=1AFW_5KQFW86jlFO16S9bt564Y86WoJjV&export=download",
39
+ }
40
+
41
+ _SUPPORTED_TASKS = [Tasks.TOPIC_MODELING]
42
+
43
+ _SOURCE_VERSION = "1.0.0"
44
+
45
+ _SEACROWD_VERSION = "2024.06.20"
46
+
47
+ _TAGS = ["bola", "news", "bisnis", "tekno", "otomotif"]
48
+
49
+ _LOCAL = False
50
+
51
+
52
+ class IndonesianNewsDataset(datasets.GeneratorBasedBuilder):
53
+ """The dataset contains 5 Indonesian News articles with imbalanced classes"""
54
+
55
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
56
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
57
+ SEACROWD_SCHEMA_NAME = "text"
58
+
59
+ BUILDER_CONFIGS = [
60
+ SEACrowdConfig(
61
+ name=f"{_DATASETNAME}_source",
62
+ version=SOURCE_VERSION,
63
+ description=f"{_DATASETNAME} source schema",
64
+ schema="source",
65
+ subset_id=_DATASETNAME,
66
+ ),
67
+ SEACrowdConfig(
68
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
69
+ version=SEACROWD_VERSION,
70
+ description=f"{_DATASETNAME} SEACrowd schema",
71
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
72
+ subset_id=f"{_DATASETNAME}",
73
+ ),
74
+ ]
75
+
76
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
77
+
78
+ def _info(self) -> datasets.DatasetInfo:
79
+ if self.config.schema == "source":
80
+ features = datasets.Features({"index": datasets.Value("string"), "news": datasets.Value("string"), "label": datasets.Value("string")})
81
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
82
+ features = schemas.text_features(_TAGS)
83
+
84
+ return datasets.DatasetInfo(
85
+ description=_DESCRIPTION,
86
+ features=features,
87
+ homepage=_HOMEPAGE,
88
+ license=_LICENSE,
89
+ citation=_CITATION,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
93
+ """Returns SplitGenerators."""
94
+ train_dir = Path(dl_manager.download(_URLS[f"{_DATASETNAME}_train"]))
95
+ test_dir = Path(dl_manager.download(_URLS[f"{_DATASETNAME}_test"]))
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ gen_kwargs={
101
+ "filepath": train_dir,
102
+ "split": "train",
103
+ },
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TEST,
107
+ gen_kwargs={
108
+ "filepath": test_dir,
109
+ "split": "test",
110
+ },
111
+ ),
112
+ ]
113
+
114
+ def _generate_examples(self, filepath: Path, split: str):
115
+ """Yields examples as (key, example) tuples."""
116
+
117
+ with open(filepath, "rb") as file:
118
+ news_file = pickle.load(file)
119
+
120
+ news_list = news_file[0]
121
+ label_list = news_file[1]
122
+
123
+ if self.config.schema == "source":
124
+ for idx, (news, label) in enumerate(zip(news_list, label_list)):
125
+ example = {"index": str(idx), "news": news, "label": label}
126
+ yield idx, example
127
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
128
+ for idx, (news, label) in enumerate(zip(news_list, label_list)):
129
+ example = {"id": str(idx), "text": news, "label": label}
130
+ yield idx, example
131
+ else:
132
+ raise ValueError(f"Invalid config: {self.config.name}")