File size: 7,144 Bytes
cf4cc41 d94c6a0 cd1bf6a d94c6a0 fd875b9 d94c6a0 97efc47 d94c6a0 cf4cc41 4ac09d9 cf4cc41 2ddb8e5 cf4cc41 f264a27 2ddb8e5 bb08afa d94c6a0 2ddb8e5 cf4cc41 2ddb8e5 cf4cc41 2ddb8e5 cf4cc41 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
import json
import os
import zstandard as zstd
import datasets
_CITATION="""\
@article{azerbayev2023llemma,
title={Llemma: an open language model for mathematics},
author={Zhangir Azerbayev and Hailey Schoelkopf and Keiran Paster and Marco Dos Santos and Stephen McAleer and Albert Q. Jiang and Jia Deng and Stella Biderman and Sean Welleck},
eprint={xyz.xyz},
archivePrefix={arXiv}
year={2023}
}
"""
_DESCRIPTION = """\
A dataset of high quality mathematical text. """
_HOMEPAGE = "https://github.com/EleutherAI/math-lm"
# hacky workaround: listing out files here for download, because removing dataloader script entirely introduced another bug
_ARXIV_FILES = {
"train": [f"arXiv_{i:03}.jsonl.zst" for i in range(100)],
"validation": [f"arXiv_{i:03}.jsonl.zst" for i in range(100)],
"test": [f"arXiv_{i:03}.jsonl.zst" for i in range(100)],
}
_OWM_FILES = {
"train": [f"shard-{i:04}.jsonl.zst" for i in range(63)],
"validation": ["val.jsonl.zst"],
"test": ["test.jsonl.zst"],
}
_ALGSTACK_FILES = {
"train": ["agda0000.jsonl.zst", "c0000.jsonl.zst"]
+ [f"cpp{i:04}.jsonl.zst" for i in range(5)]
+ [f"fortran{i:04}.jsonl.zst" for i in range(4)]
+ ["gap0000.jsonl.zst"]
+ [f"github-MATLAB-train-{i:04}.jsonl.zst" for i in range(4)]
+ [f"github-coq-train-{i:04}.jsonl.zst" for i in range(3)]
+ ["github-isabelle-train-0000.jsonl.zst", "github-lean-train-0000.jsonl.zst"]
+ ["haskell0000.jsonl.zst", "idris0000.jsonl.zst", "isa_proofsteps.jsonl.zst"]
+ [f"julia{i:04}.jsonl.zst" for i in range(6)]
+ ["jupyter-notebook0000.jsonl.zst", "lean_proofsteps.jsonl.zst", "maple0000.jsonl.zst"]
+ [f"python{i:04}.jsonl.zst" for i in range(42)]
+ ["r0000.jsonl.zst"]
+ [f"tex{i:04}.jsonl.zst" for i in range(3)],
"validation": [
"agda-validation.jsonl.zst",
"c-validation.jsonl.zst",
"cpp-validation.jsonl.zst",
"fortran-validation.jsonl.zst",
"gap-validation.jsonl.zst",
"github-MATLAB-validation-0000.jsonl.zst",
"github-coq-validation-0000.jsonl.zst",
"github-isabelle-validation-0000.jsonl.zst",
"github-lean-validation-0000.jsonl.zst",
"haskell-validation.jsonl.zst",
"idris-validation.jsonl.zst",
"isa_proofsteps.jsonl.zst",
"julia-validation.jsonl.zst",
"jupyter-notebook-validation.jsonl.zst",
"lean_proofsteps.jsonl.zst",
"maple-validation.jsonl.zst",
"python-validation.jsonl.zst",
"r-validation.jsonl.zst",
"tex-validation.jsonl.zst",
],
"test": [
"agda-test.jsonl.zst",
"c-test.jsonl.zst",
"cpp-test.jsonl.zst",
"fortran-test.jsonl.zst",
"gap-test.jsonl.zst",
"github-MATLAB-test-0000.jsonl.zst",
"github-coq-test-0000.jsonl.zst",
"github-isabelle-test-0000.jsonl.zst",
"github-lean-test-0000.jsonl.zst",
"haskell-test.jsonl.zst",
"idris-test.jsonl.zst",
"isa_proofsteps.jsonl.zst",
"julia-test.jsonl.zst",
"jupyter-notebook-test.jsonl.zst",
"lean_proofsteps.jsonl.zst",
"maple-test.jsonl.zst",
"python-test.jsonl.zst",
"r-test.jsonl.zst",
"tex-test.jsonl.zst",
]
}
_FILES_MAPPING = {
"arxiv": _ARXIV_FILES,
"open-web-math": _OWM_FILES,
"algebraic-stack": _ALGSTACK_FILES,
}
class ProofPile2Config(datasets.BuilderConfig):
"""BuilderConfig for RedPajama sample."""
def __init__(self, *args, subsets, **kwargs):
"""BuilderConfig for ProofPile2.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ProofPile2Config, self).__init__(**kwargs)
self.subsets = subsets
class ProofPile2(datasets.GeneratorBasedBuilder):
"""A large dataset of mathematical text."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from ProofPile2Config
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
ProofPile2Config(
name='default',
subsets=['arxiv', 'open-web-math', 'algebraic-stack'],
version=VERSION,
description="All subsets"
),
ProofPile2Config(
name='arxiv',
subsets=["arxiv"],
version=VERSION,
description="ArXiv subset"
),
ProofPile2Config(
name='open-web-math',
subsets=['open-web-math'],
version=VERSION,
description="OpenWebMath"
),
ProofPile2Config(
name='algebraic-stack',
subsets=['algebraic-stack'],
version=VERSION,
description="Code subset"
),
]
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"meta": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name=split_obj,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"data_files": list(map(
dl_manager.download_and_extract,
[
f"https://huggingface.co/datasets/EleutherAI/proof-pile-2/resolve/main/{subset}/{split}/{x}"
for subset in self.config.subsets
for x in _FILES_MAPPING[subset][split]
]
))
},
)
for split, split_obj in zip(
("train", "validation", "test"),
(datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST)
)
]
def _generate_examples(self, data_files):
key = 0
for name in data_files:
with zstd.open(open(name, "rb"), "rt", encoding="utf-8") as f:
for x in f.readlines():
instance = json.loads(x)
if instance:
if "meta" not in instance:
instance["meta"] = dict()
yield key, {"text": instance["text"], "meta": json.dumps(instance["meta"])}
key += 1
|