Datasets:

Modalities:
Tabular
Text
ArXiv:
Libraries:
Datasets
License:
echr / echr.py
Jonathan Li
Downloads
927a07e
import datasets
import json
import os
from datasets import Value, Sequence
_CITATION = """\
@inproceedings{chalkidis-etal-2019-neural,
title = "Neural Legal Judgment Prediction in {E}nglish",
author = "Chalkidis, Ilias and
Androutsopoulos, Ion and
Aletras, Nikolaos",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1424",
doi = "10.18653/v1/P19-1424",
pages = "4317--4323",
}
"""
_HOMEPAGE = "https://archive.org/details/ECHR-ACL2019"
_DESCRIPTION = """\
The ECHR Cases dataset is designed for experimentation of neural judgment prediction, as in the original 2019 ACL paper "Neural Legal Judgment Prediction in English".
"""
ARTICLES = {
"2": "Right to life",
"3": "Prohibition of torture",
"4": "Prohibition of slavery and forced labour",
"5": "Right to liberty and security",
"6": "Right to a fair trial",
"7": "No punishment without law",
"8": "Right to respect for private and family life",
"9": "Freedom of thought, conscience and religion",
"10": "Freedom of expression",
"11": "Freedom of assembly and association",
"12": "Right to marry",
"13": "Right to an effective remedy",
"14": "Prohibition of discrimination",
"15": "Derogation in time of emergency",
"16": "Restrictions on political activity of aliens",
"17": "Prohibition of abuse of rights",
"18": "Limitation on use of restrictions on rights",
"34": "Individual applications",
"38": "Examination of the case",
"39": "Friendly settlements",
"46": "Binding force and execution of judgments",
"P1-1": "Protection of property",
"P1-2": "Right to education",
"P1-3": "Right to free elections",
"P3-1": "Right to free elections",
"P4-1": "Prohibition of imprisonment for debt",
"P4-2": "Freedom of movement",
"P4-3": "Prohibition of expulsion of nationals",
"P4-4": "Prohibition of collective expulsion of aliens",
"P6-1": "Abolition of the death penalty",
"P6-2": "Death penalty in time of war",
"P6-3": "Prohibition of derogations",
"P7-1": "Procedural safeguards relating to expulsion of aliens",
"P7-2": "Right of appeal in criminal matters",
"P7-3": "Compensation for wrongful conviction",
"P7-4": "Right not to be tried or punished twice",
"P7-5": "Equality between spouses",
"P12-1": "General prohibition of discrimination",
"P13-1": "Abolition of the death penalty",
"P13-2": "Prohibition of derogations",
"P13-3": "Prohibition of reservations",
}
class Echr(datasets.GeneratorBasedBuilder):
"""ECHR dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="non-anon", data_dir="data"),
datasets.BuilderConfig(name="anon", data_dir="data_anon"),
]
def _info(self):
features = datasets.Features(
{
"itemid": Value(dtype="string"),
"languageisocode": Value(dtype="string"),
"respondent": Value(dtype="string"),
"branch": Value(dtype="string"),
"date": Value(dtype="int64"),
"docname": Value(dtype="string"),
"importance": Value(dtype="int64"),
"conclusion": Value(dtype="string"),
"judges": Value(dtype="string"),
"text": Sequence(feature=Value(dtype="string")),
"violated_articles": Sequence(feature=Value(dtype="string")),
"violated_paragraphs": Sequence(feature=Value(dtype="string")),
"violated_bulletpoints": Sequence(feature=Value(dtype="string")),
"non_violated_articles": Sequence(feature=Value(dtype="string")),
"non_violated_paragraphs": Sequence(feature=Value(dtype="string")),
"non_violated_bulletpoints": Sequence(feature=Value(dtype="string")),
"violated": Value(dtype="bool"),
}
)
return datasets.DatasetInfo(
features=features,
homepage=_HOMEPAGE,
description=_DESCRIPTION,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
path_prefix = self.config.data_dir
data_dir = dl_manager.download([os.path.join(path_prefix, f"{f}.jsonl") for f in ["train", "test", "dev"]])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir[0],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir[1],
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir[2],
"split": "dev",
},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, data