Datasets:

Languages:
English
Size:
n>1T
ArXiv:
License:
soldni commited on
Commit
1eb0631
β€’
1 Parent(s): 7d5c7f2
dolma.py CHANGED
@@ -16,38 +16,34 @@
16
  """Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research"""
17
 
18
 
19
- import json
20
  from pathlib import Path
21
- from typing import Dict, List
22
 
23
  import datasets
24
- import traceback
25
  import os
26
 
27
  logger = datasets.logging.get_logger(__name__)
28
 
29
- _CURRENT_DIR = Path(__file__).resolve().parent
30
 
31
  _DESCRIPTION = """\
32
  Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research
33
  """
34
 
35
- _SUBSET_URLS = {
36
- "v1": _CURRENT_DIR / "urls/dolma-v1.txt",
37
- "v1_5r1": _CURRENT_DIR / "urls/dolma-v1_5r1.txt",
38
- "v1_5r1-sample": _CURRENT_DIR / "urls/dolma-v1_5r1-sample.txt",
39
- "v1_5r2": _CURRENT_DIR / "urls/dolma-v1_5r2.txt",
40
  }
41
- _SUBSET_VERSION = {
42
  "v1": "1.0.0",
43
  "v1_5r1": "1.5.0",
44
  "v1_5r1-sample": "1.5.0",
45
  "v1_5r2": "1.5.0",
46
  }
47
- _SUBSET_NAME = {
48
- "v1": "Dolma v1 (Aug 2023)",
49
- "v1_5r1": "Dolma v1.5r1 (Oct 2023)",
50
- "v1_5r1-sample": "Dolma v1.5r1, 2T sample (Oct 2023)",
51
  "v1_5r2": "Dolma v1.5r2 (Dec 2023)",
52
  }
53
  _BASE_URL = "https://olmo-data.org"
@@ -57,40 +53,31 @@ _DATA_DIR = os.environ.get("DOLMA_DATA_DIR", None)
57
  _CITATION = """\
58
  @article{dolma,
59
  title = {{Dolma: An Open Corpus of Three Trillion Tokens for Language Model Pretraining Research}},
60
- author = {Luca Soldaini and Rodney Kinney and Akshita Bhagia and Dustin Schwenk and David Atkinson and Russell Authur and Ben Bogin and Khyathi Chandu and Jennifer Dumas and Yanai Elazar and Valentin Hofmann and Ananya Harsh Jha and Sachin Kumar and Li Lucy and Xinxi Lyu and Ian Magnusson and Jacob Morrison and Niklas Muennighoff and Aakanksha Naik and Crystal Nam and Matthew E. Peters and Abhilasha Ravichander and Kyle Richardson and Zejiang Shen and Emma Strubell and Nishant Subramani and Oyvind Tafjord and Evan Pete Walsh and Hannaneh Hajishirzi and Noah A. Smith and Luke Zettlemoyer and Iz Beltagy and Dirk Groeneveld and Jesse Dodge and Kyle Lo},
 
 
 
 
 
 
 
 
61
  year = {2024},
62
  journal={arXiv preprint},
63
  }
64
  """
65
 
66
 
67
- class RedPajama1TConfig(datasets.BuilderConfig):
68
- """BuilderConfig for RedPajama sample."""
69
-
70
- def __init__(self, *args, subsets: List[str], url_file: Path, **kwargs):
71
- """BuilderConfig for RedPajama.
72
- Args:
73
- **kwargs: keyword arguments forwarded to super.
74
- """
75
- super(RedPajama1TConfig, self).__init__(*args, **kwargs)
76
- self.subsets = subsets
77
- self.url_file = url_file
78
-
79
-
80
- class RedPajama1T(datasets.GeneratorBasedBuilder):
81
  """Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research"""
82
 
83
- config: RedPajama1TConfig
84
-
85
  BUILDER_CONFIGS = [
86
- RedPajama1TConfig(
87
- name=subset,
88
- subsets=[subset],
89
- url_file=_SUBSET_URLS[subset],
90
- version=datasets.Version(_SUBSET_VERSION[subset], _SUBSET_NAME[subset]),
91
- description=_DESCRIPTION,
92
  )
93
- for subset in _SUBSET_URLS
94
  ]
95
 
96
  DEFAULT_CONFIG_NAME = "v1_5r2"
@@ -111,22 +98,20 @@ class RedPajama1T(datasets.GeneratorBasedBuilder):
111
  )
112
 
113
  def _split_generators(self, dl_manager):
114
- with open(self.config.url_file, encoding="utf-8") as f:
115
- subset_urls: Dict[str, List[str]] = json.load(f)
116
 
117
  breakpoint()
118
 
119
- url_lists: Dict[str, List[str]] = {}
120
- for subset in self.config.subsets:
121
- url_lists[subset] = dl_manager.download(subset_urls[subset])
122
 
123
  return [
124
  datasets.SplitGenerator(
125
- name=datasets.Split.TRAIN._name,
126
- gen_kwargs={"files": {subset: url_lists[subset] for subset in self.config.subsets}},
127
  )
128
  ]
129
 
130
  def _generate_examples(self, files):
131
  """This function returns the examples in the raw (text) form."""
132
- breakpoint()
 
16
  """Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research"""
17
 
18
 
 
19
  from pathlib import Path
 
20
 
21
  import datasets
 
22
  import os
23
 
24
  logger = datasets.logging.get_logger(__name__)
25
 
 
26
 
27
  _DESCRIPTION = """\
28
  Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research
29
  """
30
 
31
+ _URL_LISTS = {
32
+ "v1": "urls/v1.txt",
33
+ "v1_5r1": "urls/v1_5r1.txt",
34
+ "v1_5r1-sample": "urls/v1_5r1-sample.txt",
35
+ "v1_5r2": "urls/v1_5r2.txt",
36
  }
37
+ _VERSIONS = {
38
  "v1": "1.0.0",
39
  "v1_5r1": "1.5.0",
40
  "v1_5r1-sample": "1.5.0",
41
  "v1_5r2": "1.5.0",
42
  }
43
+ _DATES = {
44
+ "v1": "(Aug 2023)",
45
+ "v1_5r1": "(Oct 2023)",
46
+ "v1_5r1-sample": "(Oct 2023)",
47
  "v1_5r2": "Dolma v1.5r2 (Dec 2023)",
48
  }
49
  _BASE_URL = "https://olmo-data.org"
 
53
  _CITATION = """\
54
  @article{dolma,
55
  title = {{Dolma: An Open Corpus of Three Trillion Tokens for Language Model Pretraining Research}},
56
+ author = {
57
+ Luca Soldaini and Rodney Kinney and Akshita Bhagia and Dustin Schwenk and David Atkinson and
58
+ Russell Authur and Ben Bogin and Khyathi Chandu and Jennifer Dumas and Yanai Elazar and
59
+ Valentin Hofmann and Ananya Harsh Jha and Sachin Kumar and Li Lucy and Xinxi Lyu and Ian Magnusson and
60
+ Jacob Morrison and Niklas Muennighoff and Aakanksha Naik and Crystal Nam and Matthew E. Peters and
61
+ Abhilasha Ravichander and Kyle Richardson and Zejiang Shen and Emma Strubell and Nishant Subramani and
62
+ Oyvind Tafjord and Evan Pete Walsh and Hannaneh Hajishirzi and Noah A. Smith and Luke Zettlemoyer and
63
+ Iz Beltagy and Dirk Groeneveld and Jesse Dodge and Kyle Lo
64
+ },
65
  year = {2024},
66
  journal={arXiv preprint},
67
  }
68
  """
69
 
70
 
71
+ class Dolma(datasets.GeneratorBasedBuilder):
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  """Dolma: an Open Corpus of Three Trillion Tokens for Language Model Pretraining Research"""
73
 
 
 
74
  BUILDER_CONFIGS = [
75
+ datasets.BuilderConfig(
76
+ name=name,
77
+ version=_VERSIONS[name],
78
+ description=f"{_DESCRIPTION} {_DATES[name]}",
 
 
79
  )
80
+ for name in _URL_LISTS.keys()
81
  ]
82
 
83
  DEFAULT_CONFIG_NAME = "v1_5r2"
 
98
  )
99
 
100
  def _split_generators(self, dl_manager):
101
+ with open(_URL_LISTS[self.config.name], mode="rt", encoding="utf-8") as f:
102
+ subset_urls = f.read().splitlines()
103
 
104
  breakpoint()
105
 
106
+ subset_files = dl_manager.download(subset_urls)
 
 
107
 
108
  return [
109
  datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN, # type: ignore[assignment]
111
+ gen_kwargs={"files": subset_files},
112
  )
113
  ]
114
 
115
  def _generate_examples(self, files):
116
  """This function returns the examples in the raw (text) form."""
117
+ raise NotImplementedError("Dolma is a streaming dataset")
urls/{dolma-v1.txt β†’ v1.txt} RENAMED
File without changes
urls/{dolma-v1_5r1-sample.txt β†’ v1_5r1-sample.txt} RENAMED
File without changes
urls/{dolma-v1_5r1.txt β†’ v1_5r1.txt} RENAMED
File without changes
urls/{dolma-v1_5r2.txt β†’ v1_5r2.txt} RENAMED
File without changes