|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Jomleh The Farsi sentence dataset.""" |
|
|
|
|
|
import collections |
|
import io |
|
import zstandard |
|
import json |
|
|
|
from dataclasses import dataclass |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
@dataclass |
|
class Identification: |
|
label: str |
|
prob: float |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Jomleh is a Farsi (Persian) monolingual dataset composed of one sentence per \ |
|
sample. It's focused on quality over quantity and it's curated mostly based \ |
|
on the OSCAR project (https://oscar-project.com) among other data sources.\ |
|
""" |
|
|
|
_URL = "https://mlengineer.ai" |
|
|
|
_LICENSE = """ |
|
These data are released under this licensing scheme |
|
We do not own any of the text from which these data has been extracted. |
|
We license the actual packaging of these data under the Creative Commons CC0 license \ |
|
(\"no rights reserved\") http://creativecommons.org/publicdomain/zero/1.0/ |
|
To the extent possible under law, Inria has waived all copyright \ |
|
and related or neighboring rights to OSCAR |
|
This work is published from: France. |
|
Should you consider that our data contains material that is owned by you \ |
|
and should therefore not be reproduced here, please: |
|
* Clearly identify yourself, with detailed contact data such as an address, \ |
|
telephone number or email address at which you can be contacted. |
|
* Clearly identify the copyrighted work claimed to be infringed. |
|
* Clearly identify the material that is claimed to be infringing and \ |
|
information reasonably sufficient to allow us to locate the material. |
|
We will comply to legitimate requests by removing the affected sources \ |
|
from the next release of the corpus. \ |
|
""" |
|
|
|
_CITATION = """\ |
|
""" |
|
|
|
_BASE_DATA_PAT_FORMAT_STR = "files/" |
|
_BASE_CHECKSUM_FILE_NAME = "checksum.sha256" |
|
|
|
class JomlehConfig(datasets.BuilderConfig): |
|
"""OSCAR corpus.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for Jomleh. |
|
Args: |
|
**kwargs: Keyword arguments forwarded to super. |
|
""" |
|
description = ( |
|
f"Jomleh dataset from April 2023" |
|
) |
|
super(JomlehConfig, self).__init__( |
|
name="Jomleh", description=description, **kwargs |
|
) |
|
|
|
|
|
self.base_data_path = _BASE_DATA_PAT_FORMAT_STR |
|
|
|
|
|
class Jomleh(datasets.GeneratorBasedBuilder): |
|
"""Jomleh The Farsi text dataset based on OSCAR project.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
JomlehConfig( |
|
version=datasets.Version("2023.4.0"), |
|
) |
|
] |
|
BUILDER_CONFIG_CLASS = JomlehConfig |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int64"), |
|
"text": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_URL, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
checksum_path = self.config.base_data_path + _BASE_CHECKSUM_FILE_NAME |
|
checksum_file = dl_manager.download(checksum_path) |
|
with open(checksum_file, encoding="utf-8") as f: |
|
data_filenames = [line.split()[1] for line in f if line] |
|
data_urls = [ |
|
self.config.base_data_path + data_filename |
|
for data_filename in data_filenames |
|
] |
|
doc_files = dl_manager.download( |
|
[url for url in data_urls if url.endswith(".jsonl.zst")] |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"doc_files": doc_files} |
|
), |
|
] |
|
|
|
def _generate_examples(self, doc_files): |
|
"""This function returns the examples in the raw (text) form by iterating on all the files.""" |
|
for doc_path in doc_files: |
|
logger.info("generating examples from = %s", doc_path) |
|
|
|
with open(doc_path, "rb") as fh: |
|
dctx = zstandard.ZstdDecompressor() |
|
stream_reader = dctx.stream_reader(fh) |
|
buffered_reader = io.BufferedReader(stream_reader) |
|
text_stream = io.TextIOWrapper(buffered_reader, encoding="utf-8") |
|
for line in text_stream: |
|
doc = json.loads(line) |
|
yield doc["id"], {"id": doc["id"], "text": doc["text"], "source": doc["source"]} |
|
|