orionweller commited on
Commit
c1eb80d
1 Parent(s): 3b28030

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +47 -0
  2. train/arxiv/arxiv_0016-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  3. train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  4. train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  5. train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  6. train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  7. train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  8. train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  9. train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  10. train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  11. train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  12. train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  13. train/arxiv/arxiv_0055-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
  14. train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  15. train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  16. train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  17. train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  18. train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  19. train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  20. train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  21. train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
  22. train/arxiv/arxiv_0089-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  23. train/arxiv/arxiv_0089-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11292-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11292-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13042-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13042-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14565-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14565-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15318-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15318-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16483-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16483-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1874-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1874-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19063-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19063-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21192-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21192-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21410-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22839-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22839-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24537-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24537-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25371-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25655-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25655-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25861-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
.gitattributes CHANGED
@@ -13171,3 +13171,50 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94188-tokenized-c
13171
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94188-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13172
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_33995-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13173
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9450-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13171
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94188-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13172
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_33995-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13173
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9450-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13174
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_5349-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13175
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_48050-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13176
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_41726-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13177
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21410-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13178
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32920-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13179
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_48050-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13180
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32569-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13181
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32569-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13182
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_3898-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13183
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90315-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13184
+ train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13185
+ train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13186
+ train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
13187
+ train/arxiv/arxiv_0089-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
13188
+ train/arxiv/arxiv_0089-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13189
+ train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
13190
+ train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
13191
+ train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13192
+ train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
13193
+ train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13194
+ train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13195
+ train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13196
+ train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
13197
+ train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13198
+ train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
13199
+ train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
13200
+ train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
13201
+ train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
13202
+ train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
13203
+ train/arxiv/arxiv_0055-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
13204
+ train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13205
+ train/arxiv/arxiv_0016-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13206
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89902-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13207
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79963-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13208
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65152-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13209
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28378-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13210
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89902-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13211
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95745-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13212
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_34899-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13213
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_95745-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13214
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28378-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13215
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46817-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13216
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_34899-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13217
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91476-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13218
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46817-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13219
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13220
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
train/arxiv/arxiv_0016-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f74e596cda5650fefef1a10ee2b7e19127e094f025d755e27ef802a09c2ba81c
3
+ size 67108565
train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ff11ed7477e97be1c322ff54df7cfd96b8047c865752610ea03329d464bae81
3
+ size 67107541
train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:165326b8748e30c303bd697bff885850b81c98d441ece7334433e2953b38f933
3
+ size 67107341
train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f3afe09bdfe22c1d28c3ffb2587e51d866be7a0dd062aafa3e75c174f56d09
3
+ size 67107266
train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a04894c7184e43e260f90dc7a6cf6dbd6e3ec44c44f7aaff6e71073a5081e9f
3
+ size 67108592
train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f92f0dbdb5abd3af3684e39c35697a88b3a66e2559b428d053042af0826c9d23
3
+ size 67108101
train/arxiv/arxiv_0045-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db8552060b18f3e787487e6a28be744cbbce815b2fa1884bf3d35ca8faeaac6
3
+ size 43875330
train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3495326a65de132566ce2837e2c359dae5ed89036239b7d6f7be6a5f884c8e66
3
+ size 67108848
train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe0a14990b302b188e73344894b4f5cc7c9c95bab2d012bc358e924e176a489e
3
+ size 67107370
train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce8e99aeb56ad5e8430478acfb82d3799844454ee5e70b2b8464b7a276384a02
3
+ size 67107230
train/arxiv/arxiv_0046-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ee1e95c933ef00f109fecc5051aaa48932d4821aa327612df854c1d3e671d11
3
+ size 67107965
train/arxiv/arxiv_0055-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b04624922fad59943b9f4d189d798a780bf53fd7b44fc6f91763a3e5939f9b5
3
+ size 7833820
train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7789e3a4b6793202cbe17dab7129de875c6a38cd2e01cfeb54cff0fd35df55e1
3
+ size 67106897
train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b4c602953282ba5a5d06f8c2ea78e572b0fabef7af682b9f776238fd7a86751
3
+ size 67106762
train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23bcea6c61ce0790128d379d2195b21a8ffc1d8dcc452591598f034bb48f4547
3
+ size 67107348
train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f104867b3d0a2f66610f916d4817609d2ed2fd684d10dcdc6e83ca5ea987b2be
3
+ size 67106816
train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a6625c3d18f73db38ff4481bb1cfe14e43528fc99a213f87274b8d2268a0a6c
3
+ size 67106907
train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2120ae083b328de4dc8efa22c034252a4cdcd096d1ac7bc92c37c62c9b1081f0
3
+ size 67107899
train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc01df54d5094a4dbd9162ac039cdddebb4ccbc75e722adf2bebc4baab63efc2
3
+ size 67108811
train/arxiv/arxiv_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32cb1e3320ec0a0e9ba28668f0be58152b54787bab9b1367452a517de6976dc6
3
+ size 14116118
train/arxiv/arxiv_0089-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65b5862c3648eba5553510e292081d9c630d0ad10b9f9a4cb92d640fa2b9c512
3
+ size 67108521
train/arxiv/arxiv_0089-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65532bd51a9c39991115739eb0cb843ee7d280b04744292ae82d4369efe30013
3
+ size 67107947
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11292-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107212, "hashes": {}}, "samples": 43410, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47551286, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17416794, "hashes": {}}, "samples": 11158, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12322948, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11292-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40573118,
3
+ "num_truncated_tokens": 40538833
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13042-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107264, "hashes": {}}, "samples": 44029, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47566929, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11594386, "hashes": {}}, "samples": 7571, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8305167, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13042-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37753804,
3
+ "num_truncated_tokens": 37724917
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14565-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108853, "hashes": {}}, "samples": 44857, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47137346, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7720452, "hashes": {}}, "samples": 5162, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5400141, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14565-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35866851,
3
+ "num_truncated_tokens": 35840239
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15318-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108231, "hashes": {}}, "samples": 42984, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47741524, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19692292, "hashes": {}}, "samples": 12459, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13934905, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15318-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41684075,
3
+ "num_truncated_tokens": 41648810
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16483-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107270, "hashes": {}}, "samples": 43080, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47556895, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18727741, "hashes": {}}, "samples": 11991, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13248900, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16483-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41212877,
3
+ "num_truncated_tokens": 41177620
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1874-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108382, "hashes": {}}, "samples": 42638, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47726051, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21834757, "hashes": {}}, "samples": 13887, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15548184, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1874-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42721866,
3
+ "num_truncated_tokens": 42684532
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19063-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108462, "hashes": {}}, "samples": 44406, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47828796, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8825120, "hashes": {}}, "samples": 5847, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6294874, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19063-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36411497,
3
+ "num_truncated_tokens": 36384664
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e46511c73a5ab8bae65ae6f5a0443b303da696970d91d569503c0bd6d82ae95
3
+ size 67108398
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:681c9b95c9ba54ed6cab6f19f10ce4ff28feda6314f9499e5037f8cf37b89442
3
+ size 20830517
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21192-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107914, "hashes": {}}, "samples": 42641, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47814990, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22266971, "hashes": {}}, "samples": 14004, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15734797, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21192-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42933961,
3
+ "num_truncated_tokens": 42895942
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21410-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60b3dd431c7aec2cc5aa37049534f7d3ea04899f1d26e0acaa7e2d6c267b291a
3
+ size 67108105
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22839-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107004, "hashes": {}}, "samples": 43968, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47768234, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11217314, "hashes": {}}, "samples": 7373, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7964466, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22839-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37573136,
3
+ "num_truncated_tokens": 37543892
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24537-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108252, "hashes": {}}, "samples": 44579, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47805515, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8478264, "hashes": {}}, "samples": 5583, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6088947, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24537-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36240840,
3
+ "num_truncated_tokens": 36214195
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25371-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38669674,
3
+ "num_truncated_tokens": 38638712
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25655-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107915, "hashes": {}}, "samples": 43947, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47698707, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13123180, "hashes": {}}, "samples": 8677, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9309241, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25655-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38487041,
3
+ "num_truncated_tokens": 38455846
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25861-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106863, "hashes": {}}, "samples": 43359, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47941022, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16019826, "hashes": {}}, "samples": 10367, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11469797, "hashes": {}}}], "version": 2}