File size: 5,657 Bytes
7eb3a8d 540732b 5ff5201 7eb3a8d 5ff5201 7eb3a8d 5ff5201 7eb3a8d 5ff5201 7eb3a8d 5ff5201 4cc1fec 32ca3ad 5ff5201 540732b 927a07e 540732b 927a07e 540732b 32ca3ad 927a07e 32ca3ad 540732b 927a07e 540732b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import datasets
import json
import os
from datasets import Value, Sequence
_CITATION = """\
@inproceedings{chalkidis-etal-2019-neural,
title = "Neural Legal Judgment Prediction in {E}nglish",
author = "Chalkidis, Ilias and
Androutsopoulos, Ion and
Aletras, Nikolaos",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1424",
doi = "10.18653/v1/P19-1424",
pages = "4317--4323",
}
"""
_HOMEPAGE = "https://archive.org/details/ECHR-ACL2019"
_DESCRIPTION = """\
The ECHR Cases dataset is designed for experimentation of neural judgment prediction, as in the original 2019 ACL paper "Neural Legal Judgment Prediction in English".
"""
ARTICLES = {
"2": "Right to life",
"3": "Prohibition of torture",
"4": "Prohibition of slavery and forced labour",
"5": "Right to liberty and security",
"6": "Right to a fair trial",
"7": "No punishment without law",
"8": "Right to respect for private and family life",
"9": "Freedom of thought, conscience and religion",
"10": "Freedom of expression",
"11": "Freedom of assembly and association",
"12": "Right to marry",
"13": "Right to an effective remedy",
"14": "Prohibition of discrimination",
"15": "Derogation in time of emergency",
"16": "Restrictions on political activity of aliens",
"17": "Prohibition of abuse of rights",
"18": "Limitation on use of restrictions on rights",
"34": "Individual applications",
"38": "Examination of the case",
"39": "Friendly settlements",
"46": "Binding force and execution of judgments",
"P1-1": "Protection of property",
"P1-2": "Right to education",
"P1-3": "Right to free elections",
"P3-1": "Right to free elections",
"P4-1": "Prohibition of imprisonment for debt",
"P4-2": "Freedom of movement",
"P4-3": "Prohibition of expulsion of nationals",
"P4-4": "Prohibition of collective expulsion of aliens",
"P6-1": "Abolition of the death penalty",
"P6-2": "Death penalty in time of war",
"P6-3": "Prohibition of derogations",
"P7-1": "Procedural safeguards relating to expulsion of aliens",
"P7-2": "Right of appeal in criminal matters",
"P7-3": "Compensation for wrongful conviction",
"P7-4": "Right not to be tried or punished twice",
"P7-5": "Equality between spouses",
"P12-1": "General prohibition of discrimination",
"P13-1": "Abolition of the death penalty",
"P13-2": "Prohibition of derogations",
"P13-3": "Prohibition of reservations",
}
class Echr(datasets.GeneratorBasedBuilder):
"""ECHR dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="non-anon", data_dir="data"),
datasets.BuilderConfig(name="anon", data_dir="data_anon"),
]
def _info(self):
features = datasets.Features(
{
"itemid": Value(dtype="string"),
"languageisocode": Value(dtype="string"),
"respondent": Value(dtype="string"),
"branch": Value(dtype="string"),
"date": Value(dtype="int64"),
"docname": Value(dtype="string"),
"importance": Value(dtype="int64"),
"conclusion": Value(dtype="string"),
"judges": Value(dtype="string"),
"text": Sequence(feature=Value(dtype="string")),
"violated_articles": Sequence(feature=Value(dtype="string")),
"violated_paragraphs": Sequence(feature=Value(dtype="string")),
"violated_bulletpoints": Sequence(feature=Value(dtype="string")),
"non_violated_articles": Sequence(feature=Value(dtype="string")),
"non_violated_paragraphs": Sequence(feature=Value(dtype="string")),
"non_violated_bulletpoints": Sequence(feature=Value(dtype="string")),
"violated": Value(dtype="bool"),
}
)
return datasets.DatasetInfo(
features=features,
homepage=_HOMEPAGE,
description=_DESCRIPTION,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
path_prefix = self.config.data_dir
data_dir = dl_manager.download([os.path.join(path_prefix, f"{f}.jsonl") for f in ["train", "test", "dev"]])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir[0],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir[1],
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir[2],
"split": "dev",
},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, data
|