|
"""Legal MC4""" |
|
import ast |
|
import json |
|
|
|
import datasets |
|
from huggingface_hub.file_download import hf_hub_url |
|
|
|
try: |
|
import lzma as xz |
|
except ImportError: |
|
import pylzma as xz |
|
|
|
datasets.logging.set_verbosity_info() |
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_DESCRIPTION = """ |
|
Legal-MC4: A Corpus Covering the Legal Part of MC4 for European Languages |
|
""" |
|
|
|
_CITATION = """ |
|
""" |
|
|
|
_REPO_ID = "joelito/legal-mc4" |
|
_URL = f"https://huggingface.co/datasets/{_REPO_ID}" |
|
|
|
_LANGUAGES = { |
|
"bg": 0, |
|
"cs": 2, |
|
"da": 0, |
|
"de": 8, |
|
"el": 0, |
|
"en": 1, |
|
"es": 9, |
|
"et": 0, |
|
"fi": 0, |
|
"fr": 2, |
|
"ga": 0, |
|
|
|
"hu": 0, |
|
"it": 3, |
|
"lt": 0, |
|
"lv": 0, |
|
"mt": 0, |
|
"nl": 0, |
|
"pl": 2, |
|
"pt": 1, |
|
"ro": 0, |
|
"sk": 0, |
|
"sl": 0, |
|
"sv": 0, |
|
} |
|
_LANGS = list(_LANGUAGES.keys()) |
|
|
|
|
|
class LegalMC4Config(datasets.BuilderConfig): |
|
"""BuilderConfig for Legal-MC4.""" |
|
|
|
def __init__(self, name: str, **kwargs): |
|
"""BuilderConfig for Legal-MC4. |
|
Args: |
|
name: One of bg,cs,da,de,el,en,es,et,fi,fr,ga,hu,it,lt,lv,mt,nl,pl,pt,ro,sk,sl,sv or all |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(LegalMC4Config, self).__init__(**kwargs) |
|
self.name = name |
|
|
|
|
|
class MC4Legal(datasets.GeneratorBasedBuilder): |
|
"""Legal-MC4: A Corpus Covering the Legal Part of MC4 for European Languages""" |
|
|
|
BUILDER_CONFIGS = [LegalMC4Config(language) for language in _LANGS + ["all"]] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"url": datasets.Value("string"), |
|
"timestamp": datasets.Value("timestamp[s]"), |
|
"matches": datasets.Sequence(datasets.Value("string")), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
def get_url(file_name): |
|
return hf_hub_url(repo_id=_REPO_ID, filename=f"data/{file_name}.jsonl.xz", repo_type="dataset") |
|
|
|
data_urls = [] |
|
languages = _LANGS if self.config.name == "all" else [self.config.name] |
|
split_generators = [] |
|
for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION]: |
|
for language in languages: |
|
shards = range(_LANGUAGES[language] + 1) if split == datasets.Split.TRAIN else [0] |
|
for shard in shards: |
|
data_urls.append(get_url(f"{language}.{str(split)}.{shard}")) |
|
|
|
downloaded_files = dl_manager.download(data_urls) |
|
split_generators.append( |
|
datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": downloaded_files}) |
|
) |
|
return split_generators |
|
|
|
def _generate_examples(self, filepaths): |
|
"""This function returns the examples in the raw (text) form by iterating on all the files.""" |
|
id_ = 0 |
|
for filepath in filepaths: |
|
logger.info("Generating examples from = %s", filepath) |
|
try: |
|
with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: |
|
for line in f: |
|
if line: |
|
example = json.loads(line) |
|
if example is not None and isinstance(example, dict): |
|
timestamp = example.get("timestamp", "") |
|
|
|
if isinstance(timestamp, str) and timestamp.endswith("Z"): |
|
timestamp = timestamp[:-1] |
|
|
|
yield id_, { |
|
"url": example.get("url", ""), |
|
"timestamp": timestamp, |
|
"matches": example.get("matches", []), |
|
"text": example.get("text", ""), |
|
} |
|
id_ += 1 |
|
except Exception: |
|
logger.exception("Error while processing file %s", filepath) |
|
|