import csv import json import lzma import os import datasets try: import lzma as xz except ImportError: import pylzma as xz # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # You can copy an official description _DESCRIPTION = """\ This dataset contains court decision for law area prediction task. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URLS = { "main": "https://huggingface.co/datasets/rcds/law_area_prediction/resolve/main/data/sub_areas/huggingface" } def get_url(config_name): if config_name == "main": return _URLS["main"] class LawAreaPrediction(datasets.GeneratorBasedBuilder): """This dataset contains court decision for law area prediction task.""" VERSION = datasets.Version("1.1.0") # This is an example of a dataset with multiple configurations. # If you don't want/need to define several sub-sets in your dataset, # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. # If you need to make complex sub-parts in the datasets with configurable options # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig # BUILDER_CONFIG_CLASS = MyBuilderConfig # You will be able to load one or the other configurations in the following list with # data = datasets.load_dataset('my_dataset', 'first_domain') # data = datasets.load_dataset('my_dataset', 'second_domain') BUILDER_CONFIGS = [ datasets.BuilderConfig(name="main", version=VERSION, description="Whole dataset"), ] DEFAULT_CONFIG_NAME = "main" # It's not mandatory to have a default configuration. Just use one if it make sense. def _info(self): if self.config.name == "main": # This is the name of the configuration selected in BUILDER_CONFIGS above features = datasets.Features( { "decision_id": datasets.Value("string"), "facts": datasets.Value("string"), "considerations": datasets.Value("string"), "law_area": datasets.Value("string"), "law_sub_area": datasets.Value("string"), "language": datasets.Value("string"), "year": datasets.Value("int32"), "court": datasets.Value("string"), "chamber": datasets.Value("string"), "canton": datasets.Value("string"), "region": datasets.Value("string") # These are the features of your dataset like images, labels ... } ) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and # specify them. They'll be used if as_supervised=True in builder.as_dataset. # supervised_keys=("sentence", "label"), # Homepage of the dataset for documentation # homepage=_HOMEPAGE, # License for the dataset if available # license=_LICENSE, # Citation for the dataset # citation=_CITATION, ) def _split_generators(self, dl_manager): # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive urls = get_url(self.config.name) filepath_train = dl_manager.download(os.path.join(urls, "train.jsonl.xz")) filepath_validation = dl_manager.download(os.path.join(urls, "validation.jsonl.xz")) filepath_test = dl_manager.download(os.path.join(urls, "test.jsonl.xz")) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": filepath_train, "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": filepath_validation, "split": "validation", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": filepath_test, "split": "test" }, ) ] def belongs_to_law_area(self, law_sub_area): area_map = { "public": ['Tax', 'Urban Planning and Environmental', 'Expropriation', 'Public Administration', 'Other Fiscal'], "civil": ['Rental and Lease', 'Employment Contract', 'Bankruptcy', 'Family', 'Competition and Antitrust', 'Intellectual Property'], 'criminal': ['Substantive Criminal', 'Criminal Procedure'] } if law_sub_area in area_map[self.config.name]: return True # raise Error if law_sub_area not found in any area for area in area_map: if law_sub_area in area_map[area]: return False raise ValueError("law_sub_area not found in any area") # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath, split): # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. line_counter = 0 try: with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: for id, line in enumerate(f): line_counter += 1 if line: data = json.loads(line) if self.config.name == "main": yield id, { "decision_id": data["decision_id"], "facts": data["facts"], "considerations": data["considerations"], "law_area": data["law_area"], "law_sub_area": data["label"], "language": data["language"], "year": data["year"], "court": data["court"], "chamber": data["chamber"], "canton": data["canton"], "region": data["region"] } except lzma.LZMAError as e: print(split, e) if line_counter == 0: raise e