File size: 2,695 Bytes
59131df 8acf52d 9af75d0 59131df 8acf52d 59131df 8acf52d 59131df 8acf52d 59131df 8acf52d 59131df 8acf52d 59131df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import io
import json
import os
from glob import glob
import datasets
import zstandard as zstd
from datasets import GeneratorBasedBuilder
from datasets.utils import Version
from huggingface_hub import snapshot_download
class PileDomainDataset(GeneratorBasedBuilder):
VERSION = Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description="Pile Domain Dataset",
features=datasets.Features(
{
"text": datasets.Value("string"),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
#snapshot_download(repo_id="Multi-Domain-Expert-Layers/uspto", repo_type="dataset")
# dl_manager.download_and_extract("https://huggingface.co/datasets/Multi-Domain-Expert-Layers/uspto/resolve/main/uspto.tar.gz")
dl_path = snapshot_download(repo_id="jordiclive/uspto2", repo_type="dataset")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dir": os.path.join(dl_path, "data/train"),
"split": None,
},
),
datasets.SplitGenerator(
name="validation_pile",
gen_kwargs={
"data_dir": os.path.join(dl_path, "data/val"),
"split": "pile",
},
),
datasets.SplitGenerator(
name="validation_domain",
gen_kwargs={
"data_dir": os.path.join(dl_path, "data/val"),
"split": "domain",
},
),
datasets.SplitGenerator(
name="test_pile",
gen_kwargs={"data_dir": os.path.join(dl_path, "data/test"), "split": "pile"},
),
datasets.SplitGenerator(
name="test_domain",
gen_kwargs={"data_dir": os.path.join(dl_path, "data/test"), "split": "domain"},
),
]
def _generate_examples(self, data_dir, split):
dctx = zstd.ZstdDecompressor()
idx = -1
file_paths = glob(os.path.join(data_dir, f"*.jsonl.zst"))
if split is not None:
file_paths = [f for f in file_paths if split in f]
for file in file_paths:
with open(file, "rb") as f:
reader = dctx.stream_reader(f)
buffer = io.BufferedReader(reader)
for _, line in enumerate(buffer.readlines()):
data = json.loads(line)
idx += 1
yield idx, data
|