aluncstokes commited on
Commit
603eb40
1 Parent(s): 7c42e30

Remove loading script, update README with yaml

Browse files
Files changed (2) hide show
  1. README.md +10 -0
  2. mathpile_arxiv_subset_tiny.py +0 -126
README.md CHANGED
@@ -1,3 +1,13 @@
 
 
 
 
 
 
 
 
 
 
1
  # MathPile ArXiv (subset)
2
 
3
  ## Description
 
1
+ ---
2
+ configs:
3
+ - config_name: default
4
+ data_files:
5
+ - split: train
6
+ path: "train_chunked.jsonl"
7
+ - split: test
8
+ path: "test_chunked.jsonl"
9
+ ---
10
+
11
  # MathPile ArXiv (subset)
12
 
13
  ## Description
mathpile_arxiv_subset_tiny.py DELETED
@@ -1,126 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """This dataset consists of a toy subset of 8834 (5000 training + 3834 testing) TeX files found in the arXiv subset of MathPile, used for testing. Each document is split using LaTeX-specific characters for recursive character text splitting with ~4k token window and ~1.5k token overlaps. You should not use this dataset. Training and testing sets are already split."""
15
-
16
- import json
17
- import os
18
-
19
- import datasets
20
-
21
- _CITATION = """\
22
- @article{wang2023mathpile,
23
- title={Generative AI for Math: Part I -- MathPile: A Billion-Token-Scale Pretraining Corpus for Math},
24
- author={Wang, Zengzhi and Xia, Rui and Liu, Pengfei},
25
- journal={arXiv preprint arXiv:2312.17120},
26
- year={2023}
27
- }
28
- """
29
-
30
- _DESCRIPTION = """\
31
- This dataset consists of a toy subset of 8834 (5000 training + 3834 testing) TeX files found in the arXiv subset of MathPile, used for testing. Each document is split using LaTeX-specific characters for recursive character text splitting with ~4k token window and ~1.5k token overlaps. You should not use this dataset.
32
- """
33
-
34
- _HOMEPAGE = "https://huggingface.co/datasets/aluncstokes/mathpile_arxiv_subset_tiny"
35
-
36
-
37
- _LICENSE = "CC BY-NC-SA 4.0"
38
-
39
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
40
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
41
- _URLS = {"first_domain": "https://huggingface.co/datasets/GAIR/MathPile"}
42
-
43
-
44
- class MathpileArxivSubsetTiny(datasets.GeneratorBasedBuilder):
45
- """This dataset consists of a toy subset of 8834 (5000 training + 3834 testing) TeX files found in the arXiv subset of MathPile, used for testing. Each document is split using LaTeX-specific characters for recursive character text splitting with ~4k token window and ~1.5k token overlaps. You should not use this dataset"""
46
-
47
- VERSION = datasets.Version("0.2")
48
-
49
- # This is an example of a dataset with multiple configurations.
50
- # If you don't want/need to define several sub-sets in your dataset,
51
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
52
-
53
- # If you need to make complex sub-parts in the datasets with configurable options
54
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
55
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
56
-
57
- # You will be able to load one or the other configurations in the following list with
58
- # data = datasets.load_dataset('my_dataset', 'first_domain')
59
- # data = datasets.load_dataset('my_dataset', 'second_domain')
60
-
61
- DEFAULT_CONFIG_NAME = "" # It's not mandatory to have a default configuration. Just use one if it make sense.
62
-
63
- def _info(self):
64
- if self.config.name == "":
65
- features = datasets.Features(
66
- {
67
- "set": datasets.Value("string"),
68
- "id": datasets.Value("string"),
69
- "chunk_text": datasets.Value("long_string"),
70
- "chunk_num_tokens": datasets.Value("uint32"),
71
- "document_num_tokens": datasets.Value("uint32"),
72
- "document_language": datasets.Value("string"),
73
- }
74
- )
75
-
76
- return datasets.DatasetInfo(
77
- # This is the description that will appear on the datasets page.
78
- description=_DESCRIPTION,
79
- # This defines the different columns of the dataset and their types
80
- features=features, # Here we define them above because they are different between the two configurations
81
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
82
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
83
- # supervised_keys=("sentence", "label"),
84
- # Homepage of the dataset for documentation
85
- homepage=_HOMEPAGE,
86
- # License for the dataset if available
87
- license=_LICENSE,
88
- # Citation for the dataset
89
- citation=_CITATION,
90
- )
91
-
92
- def _split_generators(self, dl_manager):
93
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
94
-
95
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
96
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
97
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
98
- urls = _URLS[self.config.name]
99
- data_dir = dl_manager.download_and_extract(urls)
100
- return [
101
- datasets.SplitGenerator(
102
- name=datasets.Split.TRAIN,
103
- # These kwargs will be passed to _generate_examples
104
- gen_kwargs={
105
- "filepath": os.path.join(data_dir, "train_chunked.jsonl"),
106
- "split": "train",
107
- },
108
- ),
109
- datasets.SplitGenerator(
110
- name=datasets.Split.TEST,
111
- # These kwargs will be passed to _generate_examples
112
- gen_kwargs={
113
- "filepath": os.path.join(data_dir, "test_chunked.jsonl"),
114
- "split": "test",
115
- },
116
- ),
117
- ]
118
-
119
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
120
- def generate_examples(self, filepath, split):
121
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
122
- with open(filepath, encoding="utf-8") as f:
123
- for key, row in enumerate(f):
124
- data = json.loads(row)
125
- # Yields examples as (key, example) tuples
126
- yield key, {"text": data["chunk_text"]}