Datasets:

ArXiv:
License:
kiddothe2b commited on
Commit
63943fb
1 Parent(s): 118020b

initial commit

Browse files
Files changed (1) hide show
  1. fairlex.py +323 -0
fairlex.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fairlex: A multilingual benchmark for evaluating fairness in legal text processing."""
16
+
17
+ import json
18
+ import os
19
+ import textwrap
20
+
21
+ import datasets
22
+
23
+
24
+ MAIN_CITATION = """\
25
+ @inproceedings{chalkidis-etal-2022-fairlex,
26
+ author={Chalkidis, Ilias and Passini, Tommaso and Zhang, Sheng and
27
+ Tomada, Letizia and Schwemer, Sebastian Felix and Søgaard, Anders},
28
+ title={FairLex: A Multilingual Benchmark for Evaluating Fairness in Legal Text Processing},
29
+ booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics},
30
+ year={2022},
31
+ address={Dublin, Ireland}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ Fairlex: A multilingual benchmark for evaluating fairness in legal text processing.
37
+ """
38
+
39
+ ECTHR_ARTICLES = ["2", "3", "5", "6", "8", "9", "10", "11", "14", "P1-1"]
40
+
41
+ SCDB_ISSUE_AREAS = [
42
+ "Criminal Procedure",
43
+ "Civil Rights",
44
+ "First Amendment",
45
+ "Due Process",
46
+ "Privacy",
47
+ "Attorneys",
48
+ "Unions",
49
+ "Economic Activity",
50
+ "Judicial Power",
51
+ "Federalism",
52
+ "Federal Taxation",
53
+ ]
54
+
55
+ FSCS_LABELS = ["dismissal", "approval"]
56
+
57
+ CAIL_LABELS = ["0", "<=12", "<=36", "<=60", "<=120", ">120"]
58
+
59
+
60
+ class FairlexConfig(datasets.BuilderConfig):
61
+ """BuilderConfig for Fairlex."""
62
+
63
+ def __init__(
64
+ self,
65
+ label_column,
66
+ url,
67
+ data_url,
68
+ citation,
69
+ label_classes=None,
70
+ multi_label=None,
71
+ attributes=None,
72
+ **kwargs,
73
+ ):
74
+ """BuilderConfig for Fairlex.
75
+
76
+ Args:
77
+ label_column: `string`, name of the column in the jsonl file corresponding
78
+ to the label
79
+ url: `string`, url for the original project
80
+ data_url: `string`, url to download the zip file from
81
+ data_file: `string`, filename for data set
82
+ citation: `string`, citation for the data set
83
+ url: `string`, url for information about the data set
84
+ label_classes: `list[string]`, the list of classes if the label is
85
+ categorical. If not provided, then the label will be of type
86
+ `datasets.Value('float32')`.
87
+ multi_label: `boolean`, True if the task is multi-label
88
+ attributes: `List<string>`, names of the protected attributes
89
+ **kwargs: keyword arguments forwarded to super.
90
+ """
91
+ super(FairlexConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
92
+ self.label_column = label_column
93
+ self.label_classes = label_classes
94
+ self.multi_label = multi_label
95
+ self.attributes = attributes
96
+ self.url = url
97
+ self.data_url = data_url
98
+ self.citation = citation
99
+
100
+
101
+ class Fairlex(datasets.GeneratorBasedBuilder):
102
+ """Fairlex: A multilingual benchmark for evaluating fairness in legal text processing. Version 1.0"""
103
+
104
+ BUILDER_CONFIGS = [
105
+ FairlexConfig(
106
+ name="ecthr",
107
+ description=textwrap.dedent(
108
+ """\
109
+ The European Court of Human Rights (ECtHR) hears allegations that a state has breached human rights
110
+ provisions of the European Convention of Human Rights (ECHR). We use the dataset of Chalkidis et al.
111
+ (2021), which contains 11K cases from ECtHR's public database. Each case is mapped to articles of the ECHR
112
+ that were violated (if any). This is a multi-label text classification task. Given the facts of a case,
113
+ the goal is to predict the ECHR articles that were violated, if any, as decided (ruled) by the court."""
114
+ ),
115
+ label_column="labels",
116
+ label_classes=ECTHR_ARTICLES,
117
+ multi_label=True,
118
+ attributes=[
119
+ ("applicant_age", ["n/a", "<=35", "<=65", ">65"]),
120
+ ("applicant_gender", ["n/a", "male", "female"]),
121
+ ("defendant_state", ["C.E. European", "Rest of Europe"]),
122
+ ],
123
+ data_url="https://zenodo.org/record/6322643/files/ecthr.zip",
124
+ url="https://huggingface.co/datasets/ecthr_cases",
125
+ citation=textwrap.dedent(
126
+ """\
127
+ @inproceedings{chalkidis-etal-2021-paragraph,
128
+ title = "Paragraph-level Rationale Extraction through Regularization: A case study on {E}uropean Court of Human Rights Cases",
129
+ author = "Chalkidis, Ilias and
130
+ Fergadiotis, Manos and
131
+ Tsarapatsanis, Dimitrios and
132
+ Aletras, Nikolaos and
133
+ Androutsopoulos, Ion and
134
+ Malakasiotis, Prodromos",
135
+ booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
136
+ month = jun,
137
+ year = "2021",
138
+ address = "Online",
139
+ publisher = "Association for Computational Linguistics",
140
+ url = "https://aclanthology.org/2021.naacl-main.22",
141
+ doi = "10.18653/v1/2021.naacl-main.22",
142
+ pages = "226--241",
143
+ }
144
+ }"""
145
+ ),
146
+ ),
147
+ FairlexConfig(
148
+ name="scotus",
149
+ description=textwrap.dedent(
150
+ """\
151
+ The US Supreme Court (SCOTUS) is the highest federal court in the United States of America and generally
152
+ hears only the most controversial or otherwise complex cases which have not been sufficiently well solved
153
+ by lower courts. We combine information from SCOTUS opinions with the Supreme Court DataBase (SCDB)
154
+ (Spaeth, 2020). SCDB provides metadata (e.g., date of publication, decisions, issues, decision directions
155
+ and many more) for all cases. We consider the available 14 thematic issue areas (e.g, Criminal Procedure,
156
+ Civil Rights, Economic Activity, etc.). This is a single-label multi-class document classification task.
157
+ Given the court opinion, the goal is to predict the issue area whose focus is on the subject matter
158
+ of the controversy (dispute). """
159
+ ),
160
+ label_column="label",
161
+ label_classes=SCDB_ISSUE_AREAS,
162
+ multi_label=False,
163
+ attributes=[
164
+ ("decision_direction", ["conservative", "liberal"]),
165
+ ("respondent_type", ["other", "person", "organization", "public entity", "facility"]),
166
+ ],
167
+ url="http://scdb.wustl.edu/data.php",
168
+ data_url="https://zenodo.org/record/6322643/files/scotus.zip",
169
+ citation=textwrap.dedent(
170
+ """\
171
+ @misc{spaeth2020,
172
+ author = {Harold J. Spaeth and Lee Epstein and Andrew D. Martin, Jeffrey A. Segal
173
+ and Theodore J. Ruger and Sara C. Benesh},
174
+ year = {2020},
175
+ title ={{Supreme Court Database, Version 2020 Release 01}},
176
+ url= {http://Supremecourtdatabase.org},
177
+ howpublished={Washington University Law}
178
+ }"""
179
+ ),
180
+ ),
181
+ FairlexConfig(
182
+ name="fscs",
183
+ description=textwrap.dedent(
184
+ """\
185
+ The Federal Supreme Court of Switzerland (FSCS) is the last level of appeal in Switzerland and similarly
186
+ to SCOTUS, the court generally hears only the most controversial or otherwise complex cases which have
187
+ not been sufficiently well solved by lower courts. The court often focus only on small parts of previous
188
+ decision, where they discuss possible wrong reasoning by the lower court. The Swiss-Judgment-Predict
189
+ dataset (Niklaus et al., 2021) contains more than 85K decisions from the FSCS written in one of three
190
+ languages (50K German, 31K French, 4K Italian) from the years 2000 to 2020. The dataset is not parallel,
191
+ i.e., all cases are unique and decisions are written only in a single language. The dataset provides labels
192
+ for a simplified binary (approval, dismissal) classification task. Given the facts of the case, the goal
193
+ is to predict if the plaintiff's request is valid or partially valid."""
194
+ ),
195
+ label_column="label",
196
+ label_classes=FSCS_LABELS,
197
+ multi_label=False,
198
+ attributes=[
199
+ ("decision_language", ["de", "fr", "it"]),
200
+ ("legal_area", ["other", "public law", "penal law", "civil law", "social law", "insurance law"]),
201
+ (
202
+ "court_region",
203
+ [
204
+ "n/a",
205
+ "Région lémanique",
206
+ "Zürich",
207
+ "Espace Mittelland",
208
+ "Northwestern Switzerland",
209
+ "Eastern Switzerland",
210
+ "Central Switzerland",
211
+ "Ticino",
212
+ "Federation",
213
+ ],
214
+ ),
215
+ ],
216
+ url="https://github.com/JoelNiklaus/SwissCourtRulingCorpus",
217
+ data_url="https://zenodo.org/record/6322643/files/fscs.zip",
218
+ citation=textwrap.dedent(
219
+ """\
220
+ @InProceedings{niklaus-etal-2021-swiss,
221
+ author = {Niklaus, Joel
222
+ and Chalkidis, Ilias
223
+ and Stürmer, Matthias},
224
+ title = {Swiss-Court-Predict: A Multilingual Legal Judgment Prediction Benchmark},
225
+ booktitle = {Proceedings of the 2021 Natural Legal Language Processing Workshop},
226
+ year = {2021},
227
+ location = {Punta Cana, Dominican Republic},
228
+ }"""
229
+ ),
230
+ ),
231
+ FairlexConfig(
232
+ name="cail",
233
+ description=textwrap.dedent(
234
+ """\
235
+ The Supreme People's Court of China (CAIL) is the last level of appeal in China and considers cases that
236
+ originated from the high people's courts concerning matters of national importance. The Chinese AI and Law
237
+ challenge (CAIL) dataset (Xiao et al., 2018) is a Chinese legal NLP dataset for judgment prediction and
238
+ contains over 1m criminal cases. The dataset provides labels for relevant article of criminal code
239
+ prediction, charge (type of crime) prediction, imprisonment term (period) prediction, and monetary penalty
240
+ prediction. The updated (soft) version of the CAIL dataset has 104K criminal court cases. The tasks is
241
+ crime severity prediction task, a multi-class classification task, where given the facts of a case,
242
+ the goal is to predict how severe was the committed crime with respect to the imprisonment term.
243
+ We approximate crime severity by the length of imprisonment term, split in 6 clusters
244
+ (0, >=12, >=36, >=60, >=120, >120 months)."""
245
+ ),
246
+ label_column="label",
247
+ label_classes=CAIL_LABELS,
248
+ multi_label=False,
249
+ attributes=[
250
+ ("defendant_gender", ["male", "female"]),
251
+ ("court_region", ["Beijing", "Liaoning", "Hunan", "Guangdong", "Sichuan", "Guangxi", "Zhejiang"]),
252
+ ],
253
+ url="https://github.com/thunlp/LegalPLMs",
254
+ data_url="https://zenodo.org/record/6322643/files/cail.zip",
255
+ citation=textwrap.dedent(
256
+ """\
257
+ @article{wang-etal-2021-equality,
258
+ title={Equality before the Law: Legal Judgment Consistency Analysis for Fairness},
259
+ author={Yuzhong Wang and Chaojun Xiao and Shirong Ma and Haoxi Zhong and Cunchao Tu and Tianyang Zhang and Zhiyuan Liu and Maosong Sun},
260
+ year={2021},
261
+ journal={Science China - Information Sciences},
262
+ url={https://arxiv.org/abs/2103.13868}
263
+ }"""
264
+ ),
265
+ ),
266
+ ]
267
+
268
+ def _info(self):
269
+ features = {"text": datasets.Value("string")}
270
+ if self.config.multi_label:
271
+ features["labels"] = datasets.features.Sequence(datasets.ClassLabel(names=self.config.label_classes))
272
+ else:
273
+ features["label"] = datasets.ClassLabel(names=self.config.label_classes)
274
+ for attribute_name, attribute_groups in self.config.attributes:
275
+ features[attribute_name] = datasets.ClassLabel(names=attribute_groups)
276
+ return datasets.DatasetInfo(
277
+ description=self.config.description,
278
+ features=datasets.Features(features),
279
+ homepage=self.config.url,
280
+ citation=self.config.citation + "\n" + MAIN_CITATION,
281
+ )
282
+
283
+ def _split_generators(self, dl_manager):
284
+ data_dir = dl_manager.download_and_extract(self.config.data_url)
285
+ return [
286
+ datasets.SplitGenerator(
287
+ name=datasets.Split.TRAIN,
288
+ # These kwargs will be passed to _generate_examples
289
+ gen_kwargs={
290
+ "filepath": os.path.join(data_dir, "train.jsonl"),
291
+ "split": "train",
292
+ },
293
+ ),
294
+ datasets.SplitGenerator(
295
+ name=datasets.Split.TEST,
296
+ # These kwargs will be passed to _generate_examples
297
+ gen_kwargs={
298
+ "filepath": os.path.join(data_dir, "test.jsonl"),
299
+ "split": "test",
300
+ },
301
+ ),
302
+ datasets.SplitGenerator(
303
+ name=datasets.Split.VALIDATION,
304
+ # These kwargs will be passed to _generate_examples
305
+ gen_kwargs={
306
+ "filepath": os.path.join(data_dir, "val.jsonl"),
307
+ "split": "val",
308
+ },
309
+ ),
310
+ ]
311
+
312
+ def _generate_examples(self, filepath, split):
313
+ """This function returns the examples in the raw (text) form."""
314
+ with open(filepath, encoding="utf-8") as f:
315
+ for id_, row in enumerate(f):
316
+ data = json.loads(row)
317
+ example = {
318
+ "text": data["text"],
319
+ self.config.label_column: data[self.config.label_column],
320
+ }
321
+ for attribute_name, _ in self.config.attributes:
322
+ example[attribute_name] = data["attributes"][attribute_name]
323
+ yield id_, example