holylovenia commited on
Commit
9231f99
1 Parent(s): 814e7d3

Upload inset_lexicon.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inset_lexicon.py +129 -0
inset_lexicon.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from nusacrowd.utils import schemas
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @inproceedings{inproceedings,
13
+ author = {Koto, Fajri and Rahmaningtyas, Gemala},
14
+ year = {2017},
15
+ month = {12},
16
+ pages = {},
17
+ title = {InSet Lexicon: Evaluation of a Word List for Indonesian Sentiment Analysis in Microblogs},
18
+ doi = {10.1109/IALP.2017.8300625}
19
+ }
20
+ """
21
+
22
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
23
+ _LOCAL = False
24
+
25
+ _DATASETNAME = "inset_lexicon"
26
+
27
+ _DESCRIPTION = """\
28
+ InSet, an Indonesian sentiment lexicon built to identify written opinion and categorize it into positive or negative opinion,
29
+ which could be utilized to analyze public sentiment towards particular topic, event, or product. Composed using collection
30
+ of words from Indonesian tweet, InSet was constructed by manually weighting each words and enhanced by adding stemming and synonym set
31
+ """
32
+
33
+ _HOMEPAGE = "https://www.researchgate.net/publication/321757985_InSet_Lexicon_Evaluation_of_a_Word_List_for_Indonesian_Sentiment_Analysis_in_Microblogs"
34
+ _LICENSE = "Unknown"
35
+ _URLS = {_DATASETNAME: "https://github.com/fajri91/InSet/archive/refs/heads/master.zip"}
36
+
37
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
38
+ _SOURCE_VERSION = "1.0.0"
39
+ _NUSANTARA_VERSION = "1.0.0"
40
+
41
+
42
+ class InsetLexicon(datasets.GeneratorBasedBuilder):
43
+ """InSet, an Indonesian sentiment lexicon built to identify written opinion and categorize it into positive or negative opinion"""
44
+
45
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
46
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
47
+
48
+ BUILDER_CONFIGS = [
49
+ NusantaraConfig(
50
+ name="inset_lexicon_source",
51
+ version=SOURCE_VERSION,
52
+ description="Inset Lexicon source schema",
53
+ schema="source",
54
+ subset_id="inset_lexicon",
55
+ ),
56
+ NusantaraConfig(
57
+ name="inset_lexicon_nusantara_text",
58
+ version=NUSANTARA_VERSION,
59
+ description="Inset Lexicon Nusantara schema",
60
+ schema="nusantara_text",
61
+ subset_id="inset_lexicon",
62
+ ),
63
+ ]
64
+
65
+ DEFAULT_CONFIG_NAME = "inset_lexicon_source"
66
+
67
+ def _info(self) -> datasets.DatasetInfo:
68
+ if self.config.schema == "source":
69
+ features = datasets.Features({"word": datasets.Value("string"), "weight": datasets.Value("string")})
70
+ elif self.config.schema == "nusantara_text":
71
+ labels = list(range(-5, 6, 1))
72
+ labels = [str(label) for label in labels]
73
+ features = schemas.text_features(labels)
74
+
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=features,
78
+ homepage=_HOMEPAGE,
79
+ license=_LICENSE,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
84
+ """Returns SplitGenerators."""
85
+ # Dataset does not have predetermined split, putting all as TRAIN
86
+ urls = _URLS[_DATASETNAME]
87
+ base_dir = Path(dl_manager.download_and_extract(urls)) / "InSet-master"
88
+ positive_df = pd.read_csv(base_dir / "positive.tsv", sep="\t")
89
+ negative_df = pd.read_csv(base_dir / "negative.tsv", sep="\t")
90
+ merged_df = pd.concat([positive_df, negative_df]).reset_index(drop=True)
91
+ merged_data_dir = base_dir / "dataset.tsv"
92
+ merged_df.to_csv(merged_data_dir, sep="\t")
93
+
94
+ data_files = {"train": merged_data_dir}
95
+
96
+ return [
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.TRAIN,
99
+ gen_kwargs={
100
+ "filepath": data_files["train"],
101
+ "split": "train",
102
+ },
103
+ ),
104
+ ]
105
+
106
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
107
+ """Yields examples as (key, example) tuples."""
108
+ # Dataset does not have id, using row index as id
109
+ df = pd.read_csv(filepath, sep="\t", encoding="ISO-8859-1")
110
+ df.columns = ["id", "word", "weight"]
111
+
112
+ if self.config.schema == "source":
113
+ for row in df.itertuples():
114
+ ex = {
115
+ "word": row.word,
116
+ "weight": str(int(row.weight)),
117
+ }
118
+ yield row.id, ex
119
+
120
+ elif self.config.schema == "nusantara_text":
121
+ for row in df.itertuples():
122
+ ex = {
123
+ "id": str(row.id),
124
+ "text": row.word,
125
+ "label": str(int(row.weight)),
126
+ }
127
+ yield row.id, ex
128
+ else:
129
+ raise ValueError(f"Invalid config: {self.config.name}")