Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
9c298e6
1 Parent(s): 78ecd45

Upload indo_story_cloze.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indo_story_cloze.py +179 -0
indo_story_cloze.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import random
3
+ import string
4
+ from pathlib import Path
5
+ from typing import Dict, List, Tuple
6
+
7
+ import datasets
8
+
9
+ from seacrowd.utils import schemas
10
+ from seacrowd.utils.configs import SEACrowdConfig
11
+ from seacrowd.utils.constants import Licenses, Tasks
12
+
13
+ _CITATION = """
14
+ @inproceedings{koto-etal-2022-cloze,
15
+ title = "Cloze Evaluation for Deeper Understanding of Commonsense Stories in {I}ndonesian",
16
+ author = "Koto, Fajri and
17
+ Baldwin, Timothy and
18
+ Lau, Jey Han",
19
+ editor = "Bosselut, Antoine and
20
+ Li, Xiang and
21
+ Lin, Bill Yuchen and
22
+ Shwartz, Vered and
23
+ Majumder, Bodhisattwa Prasad and
24
+ Lal, Yash Kumar and
25
+ Rudinger, Rachel and
26
+ Ren, Xiang and
27
+ Tandon, Niket and
28
+ Zouhar, Vil{\'e}m",
29
+ booktitle = "Proceedings of the First Workshop on Commonsense Representation and Reasoning (CSRR 2022)",
30
+ month = may,
31
+ year = "2022",
32
+ address = "Dublin, Ireland",
33
+ publisher = "Association for Computational Linguistics",
34
+ url = "https://aclanthology.org/2022.csrr-1.2",
35
+ doi = "10.18653/v1/2022.csrr-1.2",
36
+ pages = "8--16",
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "indo_story_cloze"
41
+
42
+ _DESCRIPTION = """
43
+ A Story Cloze Test framework in Indonesian. A story in our dataset consists of four-sentence premise, one-sentence
44
+ correct ending, and one-sentence incorrect ending. In total, we have created 2,325 Indonesian stories with the
45
+ train/dev/test split 1,000/200/1,135.
46
+ """
47
+
48
+ _HOMEPAGE = "https://huggingface.co/datasets/indolem/indo_story_cloze"
49
+
50
+ _LANGUAGES = ["ind"]
51
+
52
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
53
+
54
+ _LOCAL = False
55
+
56
+ _URLS = {
57
+ _DATASETNAME: {
58
+ "train": "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/train.csv",
59
+ "dev": "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/dev.csv",
60
+ "test": "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/test.csv",
61
+ },
62
+ }
63
+
64
+ _SUPPORTED_TASKS = [Tasks.COMMONSENSE_REASONING]
65
+
66
+ _SOURCE_VERSION = "1.0.0"
67
+
68
+ _SEACROWD_VERSION = "2024.06.20"
69
+
70
+
71
+ class IndoStoryClozeDataset(datasets.GeneratorBasedBuilder):
72
+ """IndoStoryCloze is a Story Cloze dataset in Indonesian."""
73
+
74
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
75
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
76
+
77
+ BUILDER_CONFIGS = [
78
+ SEACrowdConfig(
79
+ name=f"{_DATASETNAME}_source",
80
+ version=SOURCE_VERSION,
81
+ description=f"{_DATASETNAME} source schema",
82
+ schema="source",
83
+ subset_id=_DATASETNAME,
84
+ ),
85
+ SEACrowdConfig(
86
+ name=f"{_DATASETNAME}_seacrowd_qa",
87
+ version=SEACROWD_VERSION,
88
+ description=f"{_DATASETNAME} SEACrowd schema",
89
+ schema="seacrowd_qa",
90
+ subset_id=_DATASETNAME,
91
+ ),
92
+ ]
93
+
94
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
95
+
96
+ def _info(self) -> datasets.DatasetInfo:
97
+ if self.config.schema == "source":
98
+ features = datasets.Features(
99
+ {
100
+ "sentence-1": datasets.Value("string"),
101
+ "sentence-2": datasets.Value("string"),
102
+ "sentence-3": datasets.Value("string"),
103
+ "sentence-4": datasets.Value("string"),
104
+ "correct_ending": datasets.Value("string"),
105
+ "incorrect_ending": datasets.Value("string"),
106
+ }
107
+ )
108
+
109
+ elif self.config.schema == "seacrowd_qa":
110
+ features = schemas.qa_features
111
+
112
+ return datasets.DatasetInfo(
113
+ description=_DESCRIPTION,
114
+ features=features,
115
+ homepage=_HOMEPAGE,
116
+ license=_LICENSE,
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
121
+ """Returns SplitGenerators."""
122
+ urls = _URLS[_DATASETNAME]
123
+ data_dir = dl_manager.download_and_extract(urls)
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={"filepath": data_dir, "split": "train"},
129
+ ),
130
+ datasets.SplitGenerator(
131
+ name=datasets.Split.TEST,
132
+ gen_kwargs={"filepath": data_dir, "split": "test"},
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.VALIDATION,
136
+ gen_kwargs={"filepath": data_dir, "split": "dev"},
137
+ ),
138
+ ]
139
+
140
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
141
+ if self.config.schema == "source":
142
+ data = csv.DictReader(open(filepath[split], newline="", encoding="utf-8"))
143
+ for i, row in enumerate(data):
144
+ yield i, {
145
+ "sentence-1": row["Kalimat-1"],
146
+ "sentence-2": row["Kalimat-2"],
147
+ "sentence-3": row["Kalimat-3"],
148
+ "sentence-4": row["Kalimat-4"],
149
+ "correct_ending": row["Correct Ending"],
150
+ "incorrect_ending": row["Incorrect Ending"],
151
+ }
152
+
153
+ elif self.config.schema == "seacrowd_qa":
154
+ data = csv.DictReader(open(filepath[split], newline="", encoding="utf-8"))
155
+
156
+ def build_question(line):
157
+ # Concatenate the 4 sentences, this can either be the question of the context. Set is as question for
158
+ # now. Some sentences do not have punctuation, hence adding . before concatenation.
159
+ sentences = []
160
+ for k in ["Kalimat-1", "Kalimat-2", "Kalimat-3", "Kalimat-4"]:
161
+ if line[k].strip()[-1] not in string.punctuation:
162
+ sentences.append(line[k] + ".")
163
+ else:
164
+ sentences.append(line[k])
165
+ return " ".join(sentences)
166
+
167
+ for i, row in enumerate(data):
168
+ yield i, {
169
+ "id": str(i),
170
+ "question_id": str(i),
171
+ "document_id": str(i),
172
+ "question": build_question(row),
173
+ "type": "multiple_choice",
174
+ # Reorder choices based on the randomly generated labels, avoiding correct answer at the same order.
175
+ "choices": [row["Correct Ending"], row["Incorrect Ending"]] if random.randint(0, 1) == 0 else [row["Incorrect Ending"], row["Correct Ending"]],
176
+ "context": "",
177
+ "answer": [row["Correct Ending"]],
178
+ "meta": {},
179
+ }