File size: 5,873 Bytes
74bdb2d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import datasets
import pandas as pd
from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Licenses, Tasks
_CITATION = """\
@article{wibowo2023copal,
title={COPAL-ID: Indonesian Language Reasoning with Local Culture and Nuances},
author={Wibowo, Haryo Akbarianto and Fuadi, Erland Hilman and Nityasya, Made Nindyatama and Prasojo, Radityo Eko and Aji, Alham Fikri},
journal={arXiv preprint arXiv:2311.01012},
year={2023}
}
"""
_DATASETNAME = "copal"
_DESCRIPTION = """\
COPAL is a novel Indonesian language common sense reasoning dataset. Unlike the previous Indonesian COPA dataset (XCOPA-ID), COPAL-ID incorporates Indonesian local and cultural nuances,
providing a more natural portrayal of day-to-day causal reasoning within the Indonesian cultural sphere.
Professionally written by natives from scratch, COPAL-ID is more fluent and free from awkward phrases, unlike the translated XCOPA-ID.
Additionally, COPAL-ID is presented in both standard Indonesian and Jakartan Indonesian–a commonly used dialect.
It consists of premise, choice1, choice2, question, and label, similar to the COPA dataset.
"""
_HOMEPAGE = "https://huggingface.co/datasets/haryoaw/COPAL"
_LICENSE = Licenses.CC_BY_SA_4_0.value
_URLS = {"test": "https://huggingface.co/datasets/haryoaw/COPAL/resolve/main/test_copal.csv?download=true", "test_colloquial": "https://huggingface.co/datasets/haryoaw/COPAL/resolve/main/test_copal_colloquial.csv?download=true"}
_SUPPORTED_TASKS = [Tasks.COMMONSENSE_REASONING]
_LOCAL = False
_LANGUAGES = ["ind"]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"
class COPAL(datasets.GeneratorBasedBuilder):
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
BUILDER_CONFIGS = [
SEACrowdConfig(
name=f"{_DATASETNAME}_source",
version=SOURCE_VERSION,
description="COPAL test source schema",
schema="source",
subset_id="copal",
),
SEACrowdConfig(
name=f"{_DATASETNAME}_colloquial_source",
version=SOURCE_VERSION,
description="COPAL test colloquial source schema",
schema="source",
subset_id="copal",
),
SEACrowdConfig(
name=f"{_DATASETNAME}_seacrowd_qa",
version=SEACROWD_VERSION,
description="COPAL test seacrowd schema",
schema="seacrowd_qa",
subset_id="copal",
),
SEACrowdConfig(
name=f"{_DATASETNAME}_colloquial_seacrowd_qa",
version=SEACROWD_VERSION,
description="COPAL test colloquial seacrowd schema",
schema="seacrowd_qa",
subset_id="copal",
),
]
DEFAULT_CONFIG_NAME = "copal_source"
def _info(self):
if self.config.schema == "source":
features = datasets.Features(
{
"premise": datasets.Value("string"),
"choice1": datasets.Value("string"),
"choice2": datasets.Value("string"),
"question": datasets.Value("string"),
"idx": datasets.Value("int64"),
"label": datasets.Value("int64"),
"terminology": datasets.Value("int64"),
"culture": datasets.Value("int64"),
"language": datasets.Value("int64"),
}
)
elif self.config.schema == "seacrowd_qa":
features = schemas.qa_features
features["meta"] = {"terminology": datasets.Value("int64"), "culture": datasets.Value("int64"), "language": datasets.Value("int64")}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URLS)
if "colloquial" in self.config.name:
data_url = data_dir["test_colloquial"]
else:
data_url = data_dir["test"]
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_url},
),
]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath, sep=",", header="infer").reset_index()
if self.config.schema == "source":
for row in df.itertuples():
entry = {
"premise": row.premise,
"choice1": row.choice1,
"choice2": row.choice2,
"question": row.question,
"idx": row.idx,
"label": row.label,
"terminology": row.Terminology,
"culture": row.Culture,
"language": row.Language,
}
yield row.index, entry
elif self.config.schema == "seacrowd_qa":
for row in df.itertuples():
entry = {
"id": row.idx,
"question_id": str(row.idx),
"document_id": str(row.idx),
"question": row.question,
"type": "multiple_choice",
"choices": [row.choice1, row.choice2],
"context": row.premise,
"answer": [row.choice1 if row.label == 0 else row.choice2],
"meta": {"terminology": row.Terminology, "culture": row.Culture, "language": row.Language},
}
yield row.index, entry
else:
raise ValueError(f"Invalid config: {self.config.name}")
|