Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
Catalan
Size:
10K - 100K
License:
# Loading script for the PAWS-ca dataset | |
import json | |
import datasets | |
_CITATION = """ | |
""" | |
_DESCRIPTION = """ | |
The PAWS-ca dataset (Paraphrase Adversaries from Word Scrambling in Catalan) is a translation of the English PAWS dataset into Catalan, commissioned by BSC LangTech Unit. | |
This dataset contains 4,000 human translated PAWS pairs and 49,000 machine translated pairs. | |
""" | |
_HOMEPAGE = "https://zenodo.org/record/" | |
_URL = "https://huggingface.co/datasets/projecte-aina/paws-ca/resolve/main/" | |
_TRAIN_FILE = "train.json" | |
_DEV_FILE = "dev_2k.json" | |
_TEST_FILE = "test_2k.json" | |
class PAWSXConfig(datasets.BuilderConfig): | |
"""BuilderConfig for PAWSX-ca.""" | |
def __init__(self, **kwargs): | |
"""Constructs a PAWSXConfig. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(PAWSXConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs), | |
class PAWSX(datasets.GeneratorBasedBuilder): | |
"""PAWS-ca, a Catalan version of PAWS.""" | |
VERSION = datasets.Version("1.1.0") | |
BUILDER_CONFIGS = [ | |
PAWSXConfig( | |
name="paws-ca", | |
description="PAWS-ca dataset", | |
), | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"id": datasets.Value("int32"), | |
"sentence1": datasets.Value("string"), | |
"sentence2": datasets.Value("string"), | |
"label": datasets.features.ClassLabel(names=["0", "1"]), | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, | |
# specify them here. They'll be used if as_supervised=True in | |
# builder.as_dataset. | |
supervised_keys=None, | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
urls_to_download = { | |
"train": f"{_URL}{_TRAIN_FILE}", | |
"dev": f"{_URL}{_DEV_FILE}", | |
"test": f"{_URL}{_TEST_FILE}", | |
} | |
downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), | |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), | |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), | |
] | |
def _generate_examples(self, filepath): | |
with open(filepath, encoding='utf-8') as f: | |
data = json.load(f) | |
for i, row in enumerate(data): | |
yield i, { | |
'id': row['id'], | |
'sentence1': row['sentence1'], | |
'sentence2': row['sentence2'], | |
'label': row['label'], | |
} | |