fair-rationales / fair-rationales.py
lautel's picture
Upload fair-rationales.py
a97b744
raw
history blame
13.3 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Being Right for Whose Right Reasons?"""
import json
import os
import textwrap
import datasets
MAIN_CITATION = """\
@inproceedings{thorn-jakobsen-etal-2023-right,
title = {Being Right for Whose Right Reasons?},
author = {Thorn Jakobsen, Terne Sasha and
Cabello, Laura and
S{\o}gaard, Anders},
booktitle = {Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
year = {2023},
publisher = {Association for Computational Linguistics},
url = {https://aclanthology.org/2023.acl-long.59},
doi = {10.18653/v1/2023.acl-long.59},
pages = {1033--1054}
}
"""
_DESCRIPTION = """\
Explainability methods are used to benchmark
the extent to which model predictions align
with human rationales i.e., are 'right for the
right reasons'. Previous work has failed to acknowledge, however,
that what counts as a rationale is sometimes subjective. This paper
presents what we think is a first of its kind, a
collection of human rationale annotations augmented with the annotators demographic information.
"""
SST2_LABELS = ["negative", "positive", "no sentiment"]
DYNASENT_LABELS = ["negative", "positive", "no sentiment"]
MAIN_PATH = "https://huggingface.co/datasets/coastalcph/fair-rationales/resolve/main"
class FairRationalesConfig(datasets.BuilderConfig):
"""BuilderConfig for FairRationales."""
def __init__(
self,
name,
url,
data_url,
attributes,
citation,
description,
label_classes=None,
label_classes_original=None,
**kwargs,
):
"""BuilderConfig for FairRationales.
Args:
label_column: `string`, name of the column in the jsonl file corresponding
to the label
url: `string`, url for the original project
data_url: `string`, url to download the zip file from
data_file: `string`, filename for data set
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`datasets.Value('float32')`.
attributes: `List<string>`, names of the protected attributes
**kwargs: keyword arguments forwarded to super.
"""
super(FairRationalesConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.name = name
self.label_classes = label_classes
self.label_classes_original = label_classes_original
self.attributes = attributes
self.url = url
self.data_url = data_url
self.description = description
self.citation = citation
class FairRationales(datasets.GeneratorBasedBuilder):
"""FairRationales: A multilingual benchmark for evaluating fairness in legal text processing. Version 1.0"""
BUILDER_CONFIGS = [
FairRationalesConfig(
name="sst2",
description=textwrap.dedent(
"""\
The Stanford Sentiment Treebank is a corpus with fully labeled parse trees that allows for a complete analysis of the compositional effects of sentiment in language.
Binary classification experiments on full sentences (negative or somewhat negative vs somewhat positive or positive with neutral sentences discarded) refer to the dataset as SST-2 or SST binary.
This is a subset of the original data where annotators were allowed to re-annotate an instance as neutral or "no sentiment" and provide rationales for it.
Therefore, this is a ternary text classification task (hihgly unbalanced for the 'no sentiment' class).
Given a sentence, the goal is to predict the sentiment it conveys (positive, neutral, no sentiment)."""
),
label_classes=SST2_LABELS,
label_classes_original=["negative", "positive"],
attributes=[
("group_id", ["BO", "BY", "WO", "WY", "LO", "LY"]),
("sst2_id", datasets.Value("int32")),
("sst2_split", datasets.Value("string")),
],
data_url=os.path.join(MAIN_PATH, "sst2.zip"),
url="https://huggingface.co/datasets/sst2",
citation=textwrap.dedent(
"""\
@inproceedings{socher-etal-2013-recursive,
title = "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank",
author = "Socher, Richard and
Perelygin, Alex and
Wu, Jean and
Chuang, Jason and
Manning, Christopher D. and
Ng, Andrew and
Potts, Christopher",
booktitle = "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing",
month = oct,
year = "2013",
address = "Seattle, Washington, USA",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/D13-1170",
pages = "1631--1642",
}
}"""
),
),
FairRationalesConfig(
name="dynasent",
description=textwrap.dedent(
"""\
DynaSent is an English-language benchmark task for ternary (positive/negative/neutral) sentiment analysis.
This is a subset of the original data where annotators were allowed to re-annotate an instance as neutral or "no sentiment" and provide rationales for it.
Therefore, this is a ternary text classification task (hihgly unbalanced for the 'no sentiment' class).
Given a sentence, the goal is to predict the sentiment it conveys (positive, neutral, no sentiment).
"""
),
label_classes=DYNASENT_LABELS,
label_classes_original=["negative", "positive"],
attributes=[
("group_id", ["BO", "BY", "WO", "WY", "LO", "LY"]),
],
data_url=os.path.join(MAIN_PATH, "dynasent.zip"),
url="https://huggingface.co/datasets/dynabench/dynasent",
citation=textwrap.dedent(
"""\
@article{potts-etal-2020-dynasent,
title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},
author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus and Kiela, Douwe},
journal={arXiv preprint arXiv:2012.15349},
url={https://arxiv.org/abs/2012.15349},
year={2020}
}"""
),
),
FairRationalesConfig(
name="cose",
description=textwrap.dedent(
"""\
Common Sense Explanations (CoS-E) allows for training language models to automatically
generate explanations that can be used during training and inference in a novel
Commonsense Auto-Generated Explanation (CAGE) framework.
This is a subset of the original data where annotators were allowed to re-annotate the questions and provide rationales for it.
This is a question-answering task with 1 correct answer out of 5 options.
Given a question, the goal is to predict the right answer.
"""
),
label_classes_original=["A", "B", "C", "D", "E"],
attributes=[
("group_id", ["BO", "BY", "WO", "WY", "LO", "LY"]),
],
data_url=os.path.join(MAIN_PATH, "cose.zip"),
url="https://huggingface.co/datasets/cos_e",
citation=textwrap.dedent(
"""\
@inproceedings{rajani2019explain,
title = "Explain Yourself! Leveraging Language models for Commonsense Reasoning",
author = "Rajani, Nazneen Fatema and
McCann, Bryan and
Xiong, Caiming and
Socher, Richard",
year="2019",
booktitle = "Proceedings of the 2019 Conference of the Association for Computational Linguistics (ACL2019)",
url ="https://arxiv.org/abs/1906.02361"
}
}"""
),
),
]
def _info(self):
features = {"QID": datasets.Value("string"),
"text_id": datasets.Value("int64"),
"sentence": datasets.Value("string"),
"label_index": datasets.Value("int64"),
"original_label": datasets.ClassLabel(names=self.config.label_classes_original),
"rationale": datasets.Value("string"),
"rationale_index": datasets.Value("string"),
"rationale_binary": datasets.Value("string"),
"age": datasets.Value("int32"),
"ethnicity": datasets.Value("string"),
"originaldata_id": datasets.Value("string"),
"annotator_ID": datasets.Value("int64"),
"english_proficiency": datasets.Value("string"),
"attentioncheck": datasets.Value("string"),
"gender": datasets.Value("string"),
"recruitment_age": datasets.Value("string"),
"recruitment_ethnicity": datasets.Value("string")
}
if self.config.name == "cose":
features["label"] = datasets.Value("string")
else:
features["label"] = datasets.ClassLabel(names=self.config.label_classes)
for attribute_name, attribute_groups in self.config.attributes:
if "sst2" not in attribute_name:
features[attribute_name] = datasets.ClassLabel(names=attribute_groups)
else:
features[attribute_name] = attribute_groups
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + MAIN_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, self.config.name, "train.jsonl"),
"split": "train"
},
),
]
def _generate_examples(self, filepath, split):
"""This function returns the examples in the raw (text) form."""
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
example = {
"sentence": data["sentence"],
"label": data["label"],
"text_id": data["text_id"],
"QID": data["QID"],
"label_index": data["label_index"],
"original_label": data["original_label"],
"rationale": data["rationale"],
"rationale_index": data["rationale_index"],
"rationale_binary": data["rationale_binary"],
"age": data["age"],
"recruitment_age": data["recruitment_age"],
"ethnicity": data["ethnicity"],
"recruitment_ethnicity": data["recruitment_ethnicity"],
"gender": data["gender"],
"originaldata_id": data["originaldata_id"],
"annotator_ID": data["annotator_ID"],
"english_proficiency": data["english_proficiency"],
"attentioncheck": data["attentioncheck"]
}
for attribute_name, _ in self.config.attributes:
example[attribute_name] = data[attribute_name]
if self.config.name == "sst2":
example["sst2_id"] = data["sst2_id"]
example["sst2_split"] = data["sst2_split"]
yield id_, example