holylovenia commited on
Commit
6b5c5a0
1 Parent(s): 7587a27

Upload senti_bahasa_rojak.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. senti_bahasa_rojak.py +168 -0
senti_bahasa_rojak.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """\
13
+ @inproceedings{romadhona2022brcc,
14
+ title={BRCC and SentiBahasaRojak: The First Bahasa Rojak Corpus for Pretraining and Sentiment Analysis Dataset},
15
+ author={Romadhona, Nanda Putri and Lu, Sin-En and Lu, Bo-Han and Tsai, Richard Tzong-Han},
16
+ booktitle={Proceedings of the 29th International Conference on Computational Linguistics},
17
+ pages={4418--4428},
18
+ year={2022},
19
+ organization={International Committee on Computational Linguistics},
20
+ address={Taiwan},
21
+ email={[email protected], {alznn, lu110522028, thtsai}@g.ncu.edu.tw}
22
+ }
23
+ """
24
+
25
+ _DATASETNAME = "senti_bahasa_rojak"
26
+
27
+ _DESCRIPTION = """\
28
+ This dataset contains reviews for products, movies, and stocks in the Bahasa Rojak dialect,
29
+ a popular dialect in Malaysia that consists of English, Malay, and Chinese.
30
+ Each review is manually annotated as positive (bullish for stocks) or negative (bearish for stocks).
31
+ Reviews are generated through data augmentation using English and Malay sentiment analysis datasets.
32
+ """
33
+
34
+ _HOMEPAGE = "https://data.depositar.io/dataset/brcc_and_sentibahasarojak/resource/8a558f64-98ff-4922-a751-0ce2ce8447bd"
35
+
36
+ _LANGUAGES = ["zlm", "eng", "cmn"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
37
+
38
+ _LICENSE = Licenses.UNKNOWN.value
39
+
40
+ _LOCAL = False
41
+
42
+ _URLS = {
43
+ _DATASETNAME: "https://data.depositar.io/dataset/304d1572-27d6-4549-8292-b1c8f5e9c086/resource/8a558f64-98ff-4922-a751-0ce2ce8447bd/download/BahasaRojak_Datasets.zip",
44
+ }
45
+
46
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
47
+
48
+ _SOURCE_VERSION = "1.0.0"
49
+
50
+ _SEACROWD_VERSION = "2024.06.20"
51
+
52
+
53
+ class SentiBahasaRojakDataset(datasets.GeneratorBasedBuilder):
54
+ """The BRCC (Bahasa Rojak Crawled Corpus) is a novel dataset designed for the study of Bahasa Rojak,
55
+ a code-mixed dialect combining English, Malay, and Chinese, prevalent in Malaysia.
56
+ This corpus is intended for pre-training language models and sentiment analysis,
57
+ addressing the unique challenges of processing code-mixed languages."""
58
+
59
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
60
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
61
+
62
+ subsets = ["movie", "product", "stock"]
63
+
64
+ BUILDER_CONFIGS = [SEACrowdConfig(name=f"{_DATASETNAME}.{sub}_source", version=datasets.Version(_SOURCE_VERSION), description=f"{_DATASETNAME}.{sub} source schema", schema="source", subset_id=f"{_DATASETNAME}.{sub}",) for sub in subsets] + [
65
+ SEACrowdConfig(
66
+ name=f"{_DATASETNAME}.{sub}_seacrowd_text",
67
+ version=datasets.Version(_SEACROWD_VERSION),
68
+ description=f"{_DATASETNAME}.{sub} SEACrowd schema",
69
+ schema="seacrowd_text",
70
+ subset_id=f"{_DATASETNAME}.{sub}",
71
+ )
72
+ for sub in subsets
73
+ ]
74
+
75
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}.movie_source"
76
+ LABELS = ["positive", "negative"]
77
+
78
+ def _info(self) -> datasets.DatasetInfo:
79
+ if self.config.schema == "source":
80
+ features = datasets.Features(
81
+ {
82
+ "text": datasets.Value("string"),
83
+ "label": datasets.ClassLabel(names=self.LABELS),
84
+ }
85
+ )
86
+ elif self.config.schema == "seacrowd_text":
87
+ features = schemas.text_features(self.LABELS)
88
+
89
+ return datasets.DatasetInfo(
90
+ description=_DESCRIPTION,
91
+ features=features,
92
+ homepage=_HOMEPAGE,
93
+ license=_LICENSE,
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
98
+ """Returns SplitGenerators."""
99
+ urls = _URLS[_DATASETNAME]
100
+ data_dir = dl_manager.download_and_extract(urls)
101
+ data_dir = os.path.join(data_dir, "BahasaRojak Datasets", "SentiBahasaRojak")
102
+
103
+ subset = self.config.name.split(".")[-1].split("_")[0]
104
+ subset_dir = os.path.join(data_dir, f"SentiBahasaRojak-{subset.capitalize()}")
105
+ filepath = {}
106
+ if subset == "stock":
107
+ for split in ["train", "valid", "test"]:
108
+ filepath[split] = os.path.join(subset_dir, f"{split}_labeled.tsv")
109
+ else:
110
+ for split in ["train", "valid", "test"]:
111
+ filepath[split] = os.path.join(subset_dir, f"mix.{split}")
112
+
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TRAIN,
116
+ gen_kwargs={
117
+ "filepath": filepath["train"],
118
+ "split": "train",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ gen_kwargs={
124
+ "filepath": filepath["test"],
125
+ "split": "test",
126
+ },
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.VALIDATION,
130
+ gen_kwargs={
131
+ "filepath": filepath["valid"],
132
+ "split": "valid",
133
+ },
134
+ ),
135
+ ]
136
+
137
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
138
+ """Yields examples as (key, example) tuples."""
139
+ if filepath.endswith(".tsv"):
140
+ with open(filepath, encoding="utf-8") as file:
141
+ reader = csv.reader(file, delimiter="\t", quoting=csv.QUOTE_NONE)
142
+ for row_idx, row in enumerate(reader):
143
+ if self.config.schema == "source":
144
+ yield row_idx, {
145
+ "text": row[0],
146
+ "label": "positive" if row[1] == 1 else "negative",
147
+ }
148
+ elif self.config.schema == "seacrowd_text":
149
+ yield row_idx, {
150
+ "id": row_idx,
151
+ "text": row[0],
152
+ "label": "positive" if row[1] == 1 else "negative",
153
+ }
154
+ else:
155
+ labelpath = filepath + ".label"
156
+ with open(filepath, encoding="utf-8") as file, open(labelpath, encoding="utf-8") as label_file:
157
+ for row_idx, (text, label) in enumerate(zip(file, label_file)):
158
+ if self.config.schema == "source":
159
+ yield row_idx, {
160
+ "text": text.strip(),
161
+ "label": label.strip(),
162
+ }
163
+ elif self.config.schema == "seacrowd_text":
164
+ yield row_idx, {
165
+ "id": row_idx,
166
+ "text": text.strip(),
167
+ "label": label.strip(),
168
+ }