lautel commited on
Commit
96c5b8f
1 Parent(s): fc29f34

Upload fair-rationales.py

Browse files
Files changed (1) hide show
  1. fair-rationales.py +296 -0
fair-rationales.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Being Right for Whose Right Reasons?"""
16
+
17
+ import json
18
+ import os
19
+ import textwrap
20
+
21
+ import datasets
22
+
23
+
24
+ MAIN_CITATION = """\
25
+ @inproceedings{thorn-jakobsen-etal-2023-right,
26
+ title = {Being Right for Whose Right Reasons?},
27
+ author = {Thorn Jakobsen, Terne Sasha and
28
+ Cabello, Laura and
29
+ S{\o}gaard, Anders},
30
+ booktitle = {Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
31
+ year = {2023},
32
+ publisher = {Association for Computational Linguistics},
33
+ url = {https://aclanthology.org/2023.acl-long.59},
34
+ doi = {10.18653/v1/2023.acl-long.59},
35
+ pages = {1033--1054}
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ Explainability methods are used to benchmark
41
+ the extent to which model predictions align
42
+ with human rationales i.e., are 'right for the
43
+ right reasons'. Previous work has failed to acknowledge, however,
44
+ that what counts as a rationale is sometimes subjective. This paper
45
+ presents what we think is a first of its kind, a
46
+ collection of human rationale annotations augmented with the annotators demographic information.
47
+ """
48
+
49
+ SST2_LABELS = ["negative", "positive", "no sentiment"]
50
+
51
+ DYNASENT_LABELS = ["negative", "positive", "no sentiment"]
52
+
53
+ MAIN_PATH = "https://huggingface.co/datasets/lautel/fr/resolve/main"
54
+
55
+
56
+ class FairRationalesConfig(datasets.BuilderConfig):
57
+ """BuilderConfig for FairRationales."""
58
+
59
+ def __init__(
60
+ self,
61
+ dataname,
62
+ url,
63
+ data_url,
64
+ citation,
65
+ # data_file,
66
+ # attentioncheck,
67
+ # group_id,
68
+ # originaldata_split,
69
+ attributes,
70
+ label_classes=None,
71
+ label_classes_original=None,
72
+ **kwargs,
73
+ ):
74
+ """BuilderConfig for FairRationales.
75
+ Args:
76
+ label_column: `string`, name of the column in the jsonl file corresponding
77
+ to the label
78
+ url: `string`, url for the original project
79
+ data_url: `string`, url to download the zip file from
80
+ data_file: `string`, filename for data set
81
+ citation: `string`, citation for the data set
82
+ url: `string`, url for information about the data set
83
+ label_classes: `list[string]`, the list of classes if the label is
84
+ categorical. If not provided, then the label will be of type
85
+ `datasets.Value('float32')`.
86
+ attributes: `List<string>`, names of the protected attributes
87
+ **kwargs: keyword arguments forwarded to super.
88
+ """
89
+ super(FairRationalesConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
90
+ self.dataname = dataname
91
+ self.label_classes = label_classes
92
+ self.label_classes_original = label_classes_original
93
+ # self.attentioncheck = attentioncheck
94
+ # self.group_id = group_id
95
+ # self.originaldata_split = originaldata_split
96
+ self.attributes = attributes
97
+ self.data_url = data_url
98
+ # self.data_file = data_file
99
+ self.url = url
100
+ self.citation = citation
101
+
102
+
103
+ class FairRationales(datasets.GeneratorBasedBuilder):
104
+ """FairRationales: A multilingual benchmark for evaluating fairness in legal text processing. Version 1.0"""
105
+
106
+ BUILDER_CONFIGS = [
107
+ FairRationalesConfig(
108
+ dataname="sst2",
109
+ description=textwrap.dedent(
110
+ """\
111
+ The Stanford Sentiment Treebank is a corpus with fully labeled parse trees that allows for a complete analysis of the compositional effects of sentiment in language.
112
+ Binary classification experiments on full sentences (negative or somewhat negative vs somewhat positive or positive with neutral sentences discarded) refer to the dataset as SST-2 or SST binary.
113
+ This is a subset of the original data where annotators were allowed to re-annotate an instance as neutral or "no sentiment" and provide rationales for it.
114
+ Therefore, this is a ternary text classification task (hihgly unbalanced for the 'no sentiment' class).
115
+ Given a sentence, the goal is to predict the sentiment it conveys (positive, neutral, no sentiment)."""
116
+ ),
117
+
118
+ label_classes=SST2_LABELS,
119
+ label_classes_original=["negative", "positive"],
120
+ attributes=[
121
+ ("recruitment_ethnicity", ["Latino/Hispanic", "White/Causasian", "Black/African American"]),
122
+ ("recruitment_age", [">=38", "<=35"]),
123
+ ("group_id", ["BO", "BY", "WO", "WY", "LO", "LY"]),
124
+ ("gender", ["Male", "Female"]),
125
+ ("english_proficiency", ["Not well", "Well", "Very well"]),
126
+ ("attentioncheck", ["PASSED", "FAILED"]),
127
+ ("sst2_id", datasets.Value("int32")),
128
+ ("sst2_split", datasets.Value("string")),
129
+ ],
130
+ data_url=os.path.join(MAIN_PATH, "sst2.zip"),
131
+ url="https://huggingface.co/datasets/sst2",
132
+ citation=textwrap.dedent(
133
+ """\
134
+ @inproceedings{socher-etal-2013-recursive,
135
+ title = "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank",
136
+ author = "Socher, Richard and
137
+ Perelygin, Alex and
138
+ Wu, Jean and
139
+ Chuang, Jason and
140
+ Manning, Christopher D. and
141
+ Ng, Andrew and
142
+ Potts, Christopher",
143
+ booktitle = "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing",
144
+ month = oct,
145
+ year = "2013",
146
+ address = "Seattle, Washington, USA",
147
+ publisher = "Association for Computational Linguistics",
148
+ url = "https://www.aclweb.org/anthology/D13-1170",
149
+ pages = "1631--1642",
150
+ }
151
+ }"""
152
+ ),
153
+ ),
154
+ FairRationalesConfig(
155
+ dataname="dynasent",
156
+ description=textwrap.dedent(
157
+ """\
158
+ DynaSent is an English-language benchmark task for ternary (positive/negative/neutral) sentiment analysis.
159
+ This is a subset of the original data where annotators were allowed to re-annotate an instance as neutral or "no sentiment" and provide rationales for it.
160
+ Therefore, this is a ternary text classification task (hihgly unbalanced for the 'no sentiment' class).
161
+ Given a sentence, the goal is to predict the sentiment it conveys (positive, neutral, no sentiment).
162
+ """
163
+ ),
164
+ label_classes=DYNASENT_LABELS,
165
+ label_classes_original=["negative", "positive"],
166
+ attributes=[
167
+ ("recruitment_ethnicity", ["Latino/Hispanic", "White/Causasian", "Black/African American"]),
168
+ ("recruitment_age", [">=38", "<=35"]),
169
+ ("group_id", ["BO", "BY", "WO", "WY", "LO", "LY"]),
170
+ ("gender", ["Male", "Female", "Why do you conflate sex and gender?"]),
171
+ ("english_proficiency", ["Not well", "Well", "Very well"]),
172
+ ("attentioncheck", ["PASSED", "FAILED"]),
173
+ ],
174
+ data_url=os.path.join(MAIN_PATH, "dynasent.zip"),
175
+ url="https://huggingface.co/datasets/dynabench/dynasent",
176
+ citation=textwrap.dedent(
177
+ """\
178
+ @article{potts-etal-2020-dynasent,
179
+ title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},
180
+ author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus and Kiela, Douwe},
181
+ journal={arXiv preprint arXiv:2012.15349},
182
+ url={https://arxiv.org/abs/2012.15349},
183
+ year={2020}
184
+ }"""
185
+ ),
186
+ ),
187
+ FairRationalesConfig(
188
+ dataname="cose",
189
+ description=textwrap.dedent(
190
+ """\
191
+ Common Sense Explanations (CoS-E) allows for training language models to automatically
192
+ generate explanations that can be used during training and inference in a novel
193
+ Commonsense Auto-Generated Explanation (CAGE) framework.
194
+ This is a subset of the original data where annotators were allowed to re-annotate the questions and provide rationales for it.
195
+ This is a question-answering task with 1 correct answer out of 5 options.
196
+ Given a question, the goal is to predict the right answer.
197
+ """
198
+ ),
199
+ label_classes_original=["A", "B", "C", "D", "E"],
200
+ attributes=[
201
+ ("recruitment_ethnicity", ["Latino/Hispanic", "White/Causasian", "Black/African American"]),
202
+ ("recruitment_age", [">=38", "<=35"]),
203
+ ("group_id", ["BO", "BY", "WO", "WY", "LO", "LY"]),
204
+ ("gender", ["Male", "Female", "Curious ", "Non-binary/third gender"]),
205
+ ("english_proficiency", ["Not well", "Well", "Very well"]),
206
+ ("attentioncheck", ["PASSED", "FAILED"]),
207
+ ],
208
+ data_url=os.path.join(MAIN_PATH, "cose.zip"),
209
+ url="https://huggingface.co/datasets/cos_e",
210
+ citation=textwrap.dedent(
211
+ """\
212
+ @inproceedings{rajani2019explain,
213
+ title = "Explain Yourself! Leveraging Language models for Commonsense Reasoning",
214
+ author = "Rajani, Nazneen Fatema and
215
+ McCann, Bryan and
216
+ Xiong, Caiming and
217
+ Socher, Richard",
218
+ year="2019",
219
+ booktitle = "Proceedings of the 2019 Conference of the Association for Computational Linguistics (ACL2019)",
220
+ url ="https://arxiv.org/abs/1906.02361"
221
+ }
222
+ }"""
223
+ ),
224
+ ),
225
+ ]
226
+
227
+ def _info(self):
228
+ features = {"QID": datasets.Value("string"),
229
+ "text_id": datasets.Value("int64"),
230
+ "sentence": datasets.Value("string"),
231
+ "label_index": datasets.Value("int64"),
232
+ "original_label": datasets.ClassLabel(names=self.config.label_classes_original),
233
+ "rationale": datasets.Value("string"),
234
+ "rationale_index": datasets.Value("string"),
235
+ "rationale_binary": datasets.Value("string"),
236
+ "age": datasets.Value("int32"),
237
+ "ethnicity": datasets.Value("string"),
238
+ "originaldata_id": datasets.Value("string"),
239
+ "annotator_ID": datasets.Value("int64")
240
+ }
241
+ if self.config.dataname == "cose":
242
+ features["label"] = datasets.Value("string")
243
+ else:
244
+ features["label"] = datasets.ClassLabel(names=self.config.label_classes)
245
+ for attribute_name, attribute_groups in self.config.attributes:
246
+ if "sst2" not in attribute_name:
247
+ features[attribute_name] = datasets.ClassLabel(names=attribute_groups)
248
+ else:
249
+ features[attribute_name] = attribute_groups
250
+ return datasets.DatasetInfo(
251
+ description=self.config.description,
252
+ features=datasets.Features(features),
253
+ homepage=self.config.url,
254
+ citation=self.config.citation + "\n" + MAIN_CITATION,
255
+ )
256
+
257
+ def _split_generators(self, dl_manager):
258
+ data_dir = dl_manager.download_and_extract(self.config.data_url)
259
+ return [
260
+ datasets.SplitGenerator(
261
+ name=datasets.Split.TRAIN,
262
+ # These kwargs will be passed to _generate_examples
263
+ gen_kwargs={
264
+ "filepath": os.path.join(data_dir, "train.jsonl"),
265
+ "split": "train"
266
+ },
267
+ ),
268
+ ]
269
+
270
+ def _generate_examples(self, filepath, split):
271
+ """This function returns the examples in the raw (text) form."""
272
+ with open(filepath, encoding="utf-8") as f:
273
+ for id_, row in enumerate(f):
274
+ data = json.loads(row)
275
+ example = {
276
+ "sentence": data["sentence"],
277
+ "label": data["label"],
278
+ "text_id": data["text_id"],
279
+ "QID": data["QID"],
280
+ "label_index": data["label_index"],
281
+ "original_label": data["original_label"],
282
+ "rationale": data["rationale"],
283
+ "rationale_index": data["rationale_index"],
284
+ "rationale_binary": data["rationale_binary"],
285
+ "age": data["age"],
286
+ "ethnicity": data["ethnicity"],
287
+ "gender": data["gender"],
288
+ "originaldata_id": data["originaldata_id"],
289
+ "annotator_ID": data["annotator_ID"]
290
+ }
291
+ for attribute_name, _ in self.config.attributes:
292
+ example[attribute_name] = data[attribute_name]
293
+ if self.config.dataname == "sst2":
294
+ example["sst2_id"] = data["sst2_id"]
295
+ example["sst2_split"] = data["sst2_split"]
296
+ yield id_, example